static int sd_callback2(proc_t p, void * args) { struct sd_iterargs * sd = (struct sd_iterargs *)args; int signo = sd->signo; int setsdstate = sd->setsdstate; int countproc = sd->countproc; proc_lock(p); p->p_shutdownstate = setsdstate; if (p->p_stat != SZOMB) { proc_unlock(p); if (countproc != 0) { proc_list_lock(); p->p_listflag |= P_LIST_EXITCOUNT; proc_shutdown_exitcount++; proc_list_unlock(); } psignal(p, signo); if (countproc != 0) sd->activecount++; } else proc_unlock(p); return(PROC_RETURNED); }
static inline int proc_poll_inputs (zebra_processor_t *proc, poll_desc_t *p, int timeout) { assert(p->num); int rc = poll(p->fds, p->num, timeout); if(rc <= 0) /* FIXME detect and handle fatal errors (somehow) */ return(rc); (void)proc_lock(proc); if(p->fds[0].revents && p->fds[0].fd == proc->kick_fds[0]) { unsigned junk[2]; read(proc->kick_fds[0], junk, 2 * sizeof(unsigned)); proc_unlock(proc); return(1); } int i; for(i = 0; i < p->num; i++) if(p->fds[i].revents) { if(p->handlers[i]) p->handlers[i](proc, i); p->fds[i].revents = 0; /* debug */ rc--; } proc_unlock(proc); assert(!rc); return(0); }
int procmgr_stop_or_cont(message_context_t *ctp, int code, unsigned flags, void *handle) { union sigval value = ctp->msg->pulse.value; PROCESS *prp; PROCESS *parent; int type; int status; struct wait_entry *wap, **pwap; if (procmgr_scoid != ctp->msg->pulse.scoid) { return 0; } if((prp = proc_lock_pid(value.sival_int))) { if((parent = proc_lock_pid(prp->parent->pid))) { type = (code == PROC_CODE_CONT) ? WCONTINUED : WSTOPPED; for(pwap = &parent->wap; (wap = *pwap);) { if((status = procmgr_wait_check(prp, parent, wap, type)) > 0) { *pwap = wap->next; proc_object_free(&wait_souls, wap); if(status == type) { break; } } else { pwap = &wap->next; } } proc_unlock(parent); } proc_unlock(prp); } return 0; }
int zebra_process_one (zebra_processor_t *proc, int timeout) { if(proc_lock(proc) < 0) return(-1); int rc = 0; if(proc->video) { zebra_image_scanner_enable_cache(proc->scanner, 1); rc = zebra_video_enable(proc->video, 1); /* FIXME failure recovery? */ int vid_fd = zebra_video_get_fd(proc->video); if(vid_fd >= 0) add_poll(proc, vid_fd, proc_video_handler); proc->active = 1; #ifdef HAVE_LIBPTHREAD pthread_cond_broadcast(&proc->event); #endif proc_event_wait(proc, EVENT_OUTPUT, timeout); rc = zebra_video_enable(proc->video, 0); if(vid_fd >= 0) remove_poll(proc, vid_fd); proc->active = 0; proc->events &= ~EVENT_INPUT; zebra_image_scanner_enable_cache(proc->scanner, 0); } else rc = -1; proc_unlock(proc); return(rc); }
int cs_allow_invalid(struct proc *p) { #if MACH_ASSERT lck_mtx_assert(&p->p_mlock, LCK_MTX_ASSERT_NOTOWNED); #endif #if CONFIG_MACF && CONFIG_ENFORCE_SIGNED_CODE /* There needs to be a MAC policy to implement this hook, or else the * kill bits will be cleared here every time. If we have * CONFIG_ENFORCE_SIGNED_CODE, we can assume there is a policy * implementing the hook. */ if( 0 != mac_proc_check_run_cs_invalid(p)) { if(cs_debug) printf("CODE SIGNING: cs_allow_invalid() " "not allowed: pid %d\n", p->p_pid); return 0; } if(cs_debug) printf("CODE SIGNING: cs_allow_invalid() " "allowed: pid %d\n", p->p_pid); proc_lock(p); p->p_csflags &= ~(CS_KILL | CS_HARD); proc_unlock(p); vm_map_switch_protect(get_task_map(p->task), FALSE); #endif return (p->p_csflags & (CS_KILL | CS_HARD)) == 0; }
int zebra_processor_set_active (zebra_processor_t *proc, int active) { if(proc_lock(proc) < 0) return(-1); if(!proc->video) { proc_unlock(proc); return(-1); } zebra_image_scanner_enable_cache(proc->scanner, active); int rc = zebra_video_enable(proc->video, active); int vid_fd = zebra_video_get_fd(proc->video); if(vid_fd >= 0) { if(active) add_poll(proc, vid_fd, proc_video_handler); else remove_poll(proc, vid_fd); } /* FIXME failure recovery? */ proc->active = active; proc->events &= ~EVENT_INPUT; #ifdef HAVE_LIBPTHREAD if(proc->threaded) { assert(!proc->sem); assert(pthread_equal(proc->sem_owner, pthread_self())); pthread_mutex_lock(&proc->mutex); proc->sem++; pthread_cond_broadcast(&proc->event); pthread_cond_signal(&proc->cond); pthread_mutex_unlock(&proc->mutex); } #endif return(rc); }
int zebra_processor_is_visible (zebra_processor_t *proc) { if(proc_lock(proc) < 0) return(-1); int visible = proc->window && proc->visible; proc_unlock(proc); return(visible); }
/* * proc_vfork_begin * * Description: start a vfork on a process * * Parameters: parent_proc process (re)entering vfork state * * Returns: (void) * * Notes: Although this function increments a count, a count in * excess of 1 is not currently supported. According to the * POSIX standard, calling anything other than execve() or * _exit() fillowing a vfork(), including calling vfork() * itself again, will result in undefned behaviour */ void proc_vfork_begin(proc_t parent_proc) { proc_lock(parent_proc); parent_proc->p_lflag |= P_LVFORK; parent_proc->p_vforkcnt++; proc_unlock(parent_proc); }
BIF_RETTYPE hipe_bifs_show_pcb_1(BIF_ALIST_1) { Process *rp = erts_pid2proc(BIF_P, ERTS_PROC_LOCK_MAIN, BIF_ARG_1, ERTS_PROC_LOCKS_ALL); if (!rp) BIF_ERROR(BIF_P, BADARG); hipe_print_pcb(rp); proc_unlock(BIF_P, rp); BIF_RET(am_true); }
static int wait_unblock(resmgr_context_t *ctp, io_pulse_t *msg, void *ocb) { PROCESS *prp; struct _msg_info info; pid_t pid; pid = ctp->info.pid; if(ND_NODE_CMP(ctp->info.nd, ND_LOCAL_NODE) != 0) { /* unblock for wait over network */ pid = pathmgr_netmgr_pid(); if(pid == 0) { /* netmgr is gone */ return _RESMGR_NOREPLY; } } if((prp = proc_lock_pid(pid))) { // msg will be NULL if we're called from wait_close_ocb() if(msg && ((MsgInfo(ctp->rcvid, &info) == -1) || !(info.flags & _NTO_MI_UNBLOCK_REQ))) { proc_unlock(prp); return _RESMGR_NOREPLY; } if((prp->flags & (_NTO_PF_LOADING | _NTO_PF_TERMING)) == 0) { struct wait_entry *p, **pp; for(pp = &prp->wap; (p = *pp); pp = &p->next) { if(p->rcvid == ctp->rcvid) { *pp = p->next; if(prp->wap == NULL) { (void)_resmgr_unbind(&ctp->info); } MsgError(ctp->rcvid, EINTR); proc_object_free(&wait_souls, p); break; } } } proc_unlock(prp); } return _RESMGR_NOREPLY; }
/* * proc_vfork_end * * Description: stop a vfork on a process * * Parameters: parent_proc process leaving vfork state * * Returns: (void) * * Notes: Decrements the count; currently, reentrancy of vfork() * is unsupported on the current process */ void proc_vfork_end(proc_t parent_proc) { proc_lock(parent_proc); parent_proc->p_vforkcnt--; if (parent_proc->p_vforkcnt < 0) panic("vfork cnt is -ve"); if (parent_proc->p_vforkcnt == 0) parent_proc->p_lflag &= ~P_LVFORK; proc_unlock(parent_proc); }
static boolean_t proc_init_wqptr_or_wait(struct proc *p) { proc_lock(p); if (p->p_wqptr == NULL){ p->p_wqptr = WQPTR_IS_INITING_VALUE; proc_unlock(p); return TRUE; } else if (p->p_wqptr == WQPTR_IS_INITING_VALUE){ assert_wait(&p->p_wqptr, THREAD_UNINT); proc_unlock(p); thread_block(THREAD_CONTINUE_NULL); return FALSE; } else { proc_unlock(p); return FALSE; } }
/* * proc_vfork_end * * Description: stop a vfork on a process * * Parameters: parent_proc process leaving vfork state * * Returns: (void) * * Notes: Decerements the count; currently, reentrancy of vfork() * is unsupported on the current process */ void proc_vfork_end(proc_t parent_proc) { proc_lock(parent_proc); parent_proc->p_vforkcnt--; if (parent_proc->p_vforkcnt < 0) panic("vfork cnt is -ve"); /* resude the vfork count; clear the flag when it goes to 0 */ if (parent_proc->p_vforkcnt == 0) parent_proc->p_lflag &= ~P_LVFORK; proc_unlock(parent_proc); }
static int sd_callback3(proc_t p, void * args) { struct sd_iterargs * sd = (struct sd_iterargs *)args; vfs_context_t ctx = vfs_context_current(); int setsdstate = sd->setsdstate; proc_lock(p); p->p_shutdownstate = setsdstate; if (p->p_stat != SZOMB) { /* * NOTE: following code ignores sig_lock and plays * with exit_thread correctly. This is OK unless we * are a multiprocessor, in which case I do not * understand the sig_lock. This needs to be fixed. * XXX */ if (p->exit_thread) { /* someone already doing it */ proc_unlock(p); /* give him a chance */ thread_block(THREAD_CONTINUE_NULL); } else { p->exit_thread = current_thread(); printf("."); sd_log(ctx, "%s[%d] had to be forced closed with exit1().\n", p->p_comm, p->p_pid); proc_unlock(p); KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_FRCEXIT) | DBG_FUNC_NONE, p->p_pid, 0, 1, 0, 0); sd->activecount++; exit1(p, 1, (int *)NULL); } } else { proc_unlock(p); } return PROC_RETURNED; }
/* Not called from probe context */ void sprunlock(proc_t *p) { if (p != PROC_NULL) { lck_mtx_unlock(&p->p_dtrace_sprlock); proc_unlock(p); task_resume(p->task); proc_rele(p); } }
/* make a thread-local copy of polling data */ static inline void proc_cache_polling (zebra_processor_t *proc) { (void)proc_lock(proc); int n = proc->polling.num; zprintf(5, "%d fds\n", n); proc->thr_polling.num = n; alloc_polls(&proc->thr_polling); memcpy(proc->thr_polling.fds, proc->polling.fds, n * sizeof(struct pollfd)); memcpy(proc->thr_polling.handlers, proc->polling.handlers, n * sizeof(poll_handler_t*)); proc_unlock(proc); }
zebra_image_data_handler_t* zebra_processor_set_data_handler (zebra_processor_t *proc, zebra_image_data_handler_t *handler, const void *userdata) { if(proc_lock(proc) < 0) return(NULL); zebra_image_data_handler_t *result = proc->handler; proc->handler = handler; proc->userdata = userdata; proc_unlock(proc); return(result); }
/* Not called from probe context */ void sprunlock(proc_t *p) { if (p != PROC_NULL) { proc_unlock(p); dtrace_sprunlock(p); task_resume_internal(p->task); proc_rele(p); } }
static void proc_set_wqptr(struct proc *p, void *y) { proc_lock(p); assert(y == NULL || p->p_wqptr == WQPTR_IS_INITING_VALUE); p->p_wqptr = y; if (y != NULL){ wakeup(&p->p_wqptr); } proc_unlock(p); }
static inline int proc_event_wait_unthreaded (zebra_processor_t *proc, unsigned event, int timeout, struct timespec *abstime) { int blocking = proc->active && zebra_video_get_fd(proc->video) < 0; proc->events &= ~event; /* unthreaded, poll here for window input */ while(!(proc->events & event)) { if(blocking) { zebra_image_t *img = zebra_video_next_image(proc->video); if(!img) return(-1); process_image(proc, img); zebra_image_destroy(img); } int reltime = timeout; if(reltime >= 0) { struct timespec now; #if _POSIX_TIMERS > 0 clock_gettime(CLOCK_REALTIME, &now); #else struct timeval ustime; gettimeofday(&ustime, NULL); now.tv_nsec = ustime.tv_usec * 1000; now.tv_sec = ustime.tv_sec; #endif reltime = ((abstime->tv_sec - now.tv_sec) * 1000 + (abstime->tv_nsec - now.tv_nsec) / 1000000); if(reltime <= 0) return(0); } if(blocking && (reltime < 0 || reltime > 10)) reltime = 10; if(proc->polling.num) proc_poll_inputs(proc, (poll_desc_t*)&proc->polling, reltime); else if(!blocking) { proc_unlock(proc); struct timespec sleepns, remns; sleepns.tv_sec = timeout / 1000; sleepns.tv_nsec = (timeout % 1000) * 1000000; while(nanosleep(&sleepns, &remns) && errno == EINTR) sleepns = remns; (void)proc_lock(proc); return(0); } } return(1); }
int zebra_processor_user_wait (zebra_processor_t *proc, int timeout) { if(proc_lock(proc) < 0) return(-1); int rc = -1; if(proc->visible || proc->active || timeout > 0) rc = proc_event_wait(proc, EVENT_INPUT, timeout); if(rc > 0) rc = proc->input; proc_unlock(proc); if(!proc->visible) return(err_capture(proc, SEV_WARNING, ZEBRA_ERR_CLOSED, __func__, "display window not available for input")); return(rc); }
void zebra_processor_destroy (zebra_processor_t *proc) { (void)proc_lock(proc); proc_destroy_thread(proc->video_thread, &proc->video_started); proc_destroy_thread(proc->input_thread, &proc->input_started); if(proc->window) { zebra_window_destroy(proc->window); proc->window = NULL; _zebra_window_close(proc); } if(proc->video) { zebra_video_destroy(proc->video); proc->video = NULL; } if(proc->scanner) { zebra_image_scanner_destroy(proc->scanner); proc->scanner = NULL; } proc_unlock(proc); err_cleanup(&proc->err); #ifdef HAVE_LIBPTHREAD pthread_cond_destroy(&proc->event); pthread_cond_destroy(&proc->cond); pthread_mutex_destroy(&proc->mutex); #endif if(proc->polling.fds) { free(proc->polling.fds); proc->polling.fds = NULL; } if(proc->polling.handlers) { free(proc->polling.handlers); proc->polling.handlers = NULL; } if(proc->thr_polling.fds) { free(proc->thr_polling.fds); proc->thr_polling.fds = NULL; } if(proc->thr_polling.handlers) { free(proc->thr_polling.handlers); proc->thr_polling.handlers = NULL; } free(proc); }
int zebra_process_image (zebra_processor_t *proc, zebra_image_t *img) { if(proc_lock(proc) < 0) return(-1); int rc = 0; if(img && proc->window) rc = _zebra_window_resize(proc, zebra_image_get_width(img), zebra_image_get_height(img)); if(!rc) { zebra_image_scanner_enable_cache(proc->scanner, 0); rc = process_image(proc, img); if(proc->active) zebra_image_scanner_enable_cache(proc->scanner, 1); } proc_unlock(proc); return(rc); }
int zebra_processor_set_visible (zebra_processor_t *proc, int visible) { if(proc_lock(proc) < 0) return(-1); int rc = 0; if(proc->window) { if(proc->video) rc = _zebra_window_resize(proc, zebra_video_get_width(proc->video), zebra_video_get_height(proc->video)); if(!rc) rc = _zebra_window_set_visible(proc, visible); } else if(visible) rc = err_capture(proc, SEV_ERROR, ZEBRA_ERR_INVALID, __func__, "processor display window not initialized"); proc_unlock(proc); return(rc); }
void procmgr_nozombie(PROCESS *prp, int status) { /* * This should use CLD_EXITED instead of SI_USER, but the SignalKill() * kernel call does not allow it. SI_USER is alowable though, and this * is only used by QNX functions right now. */ if(!(prp->flags & _NTO_PF_NOZOMBIE)) { PROCESS *parent; struct wait_entry *wap, **pwap; prp->siginfo.si_signo = SIGCHLD; prp->siginfo.si_code = CLD_EXITED; prp->siginfo.si_pid = prp->pid; prp->siginfo.si_status = status; parent = proc_lock_parent(prp); if(!sigismember(&(prp->parent->sig_ignore), SIGCHLD)) { prp->flags |= _NTO_PF_WAITINFO; } for(pwap = &parent->wap; (wap = *pwap);) { if((status = procmgr_wait_check(prp, parent, wap, WEXITED)) > 0) { *pwap = wap->next; proc_object_free(&wait_souls, wap); if(status == WEXITED) { break; } } else { pwap = &wap->next; } } prp->flags |= _NTO_PF_NOZOMBIE; proc_unlock(parent); SignalKill(ND_LOCAL_NODE, prp->parent->pid, 0, SIGCHLD, SI_USER, prp->pid); } }
/* * vfork_return * * Description: "Return" to parent vfork thread() following execve/_exit; * this is done by reassociating the parent process structure * with the task, thread, and uthread. * * Refer to the ASCII art above vfork() to figure out the * state we're undoing. * * Parameters: child_proc Child process * retval System call return value array * rval Return value to present to parent * * Returns: void * * Notes: The caller resumes or exits the parent, as appropriate, after * calling this function. */ void vfork_return(proc_t child_proc, int32_t *retval, int rval) { task_t parent_task = get_threadtask(child_proc->p_vforkact); proc_t parent_proc = get_bsdtask_info(parent_task); thread_t th = current_thread(); uthread_t uth = get_bsdthread_info(th); act_thread_catt(uth->uu_userstate); /* clear vfork state in parent proc structure */ proc_vfork_end(parent_proc); /* REPATRIATE PARENT TASK, THREAD, UTHREAD */ uth->uu_userstate = 0; uth->uu_flag &= ~UT_VFORK; /* restore thread-set-id state */ if (uth->uu_flag & UT_WASSETUID) { uth->uu_flag |= UT_SETUID; uth->uu_flag &= UT_WASSETUID; } uth->uu_proc = 0; uth->uu_sigmask = uth->uu_vforkmask; proc_lock(child_proc); child_proc->p_lflag &= ~P_LINVFORK; child_proc->p_vforkact = 0; proc_unlock(child_proc); thread_set_parent(th, rval); if (retval) { retval[0] = rval; retval[1] = 0; /* mark parent */ } }
int ptrace(struct proc *p, struct ptrace_args *uap, register_t *retval) { struct proc *t = current_proc(); /* target process */ task_t task; thread_t th_act; struct uthread *ut; int tr_sigexc = 0; int error = 0; int stopped = 0; AUDIT_ARG(cmd, uap->req); AUDIT_ARG(pid, uap->pid); AUDIT_ARG(addr, uap->addr); AUDIT_ARG(value, uap->data); if (uap->req == PT_DENY_ATTACH) { proc_lock(p); if (ISSET(p->p_lflag, P_LTRACED)) { proc_unlock(p); exit1(p, W_EXITCODE(ENOTSUP, 0), retval); /* drop funnel before we return */ thread_exception_return(); /* NOTREACHED */ } SET(p->p_lflag, P_LNOATTACH); proc_unlock(p); return(0); } if (uap->req == PT_FORCEQUOTA) { if (is_suser()) { OSBitOrAtomic(P_FORCEQUOTA, (UInt32 *)&t->p_flag); return (0); } else return (EPERM); } /* * Intercept and deal with "please trace me" request. */ if (uap->req == PT_TRACE_ME) { proc_lock(p); SET(p->p_lflag, P_LTRACED); /* Non-attached case, our tracer is our parent. */ p->p_oppid = p->p_ppid; proc_unlock(p); return(0); } if (uap->req == PT_SIGEXC) { proc_lock(p); if (ISSET(p->p_lflag, P_LTRACED)) { SET(p->p_lflag, P_LSIGEXC); proc_unlock(p); return(0); } else { proc_unlock(p); return(EINVAL); } } /* * We do not want ptrace to do anything with kernel or launchd */ if (uap->pid < 2) { return(EPERM); } /* * Locate victim, and make sure it is traceable. */ if ((t = proc_find(uap->pid)) == NULL) return (ESRCH); AUDIT_ARG(process, t); task = t->task; if (uap->req == PT_ATTACHEXC) { uap->req = PT_ATTACH; tr_sigexc = 1; } if (uap->req == PT_ATTACH) { int err; if ( kauth_authorize_process(proc_ucred(p), KAUTH_PROCESS_CANTRACE, t, (uintptr_t)&err, 0, 0) == 0 ) { /* it's OK to attach */ proc_lock(t); SET(t->p_lflag, P_LTRACED); if (tr_sigexc) SET(t->p_lflag, P_LSIGEXC); t->p_oppid = t->p_ppid; proc_unlock(t); if (t->p_pptr != p) proc_reparentlocked(t, p, 1, 0); proc_lock(t); if (get_task_userstop(task) > 0 ) { stopped = 1; } t->p_xstat = 0; proc_unlock(t); psignal(t, SIGSTOP); /* * If the process was stopped, wake up and run through * issignal() again to properly connect to the tracing * process. */ if (stopped) task_resume(task); error = 0; goto out; } else { /* not allowed to attach, proper error code returned by kauth_authorize_process */ if (ISSET(t->p_lflag, P_LNOATTACH)) { psignal(p, SIGSEGV); } error = err; goto out; } } /* * You can't do what you want to the process if: * (1) It's not being traced at all, */ proc_lock(t); if (!ISSET(t->p_lflag, P_LTRACED)) { proc_unlock(t); error = EPERM; goto out; } /* * (2) it's not being traced by _you_, or */ if (t->p_pptr != p) { proc_unlock(t); error = EBUSY; goto out; } /* * (3) it's not currently stopped. */ if (t->p_stat != SSTOP) { proc_unlock(t); error = EBUSY; goto out; } /* * Mach version of ptrace executes request directly here, * thus simplifying the interaction of ptrace and signals. */ /* proc lock is held here */ switch (uap->req) { case PT_DETACH: if (t->p_oppid != t->p_ppid) { struct proc *pp; proc_unlock(t); pp = proc_find(t->p_oppid); proc_reparentlocked(t, pp ? pp : initproc, 1, 0); if (pp != PROC_NULL) proc_rele(pp); proc_lock(t); } t->p_oppid = 0; CLR(t->p_lflag, P_LTRACED); CLR(t->p_lflag, P_LSIGEXC); proc_unlock(t); goto resume; case PT_KILL: /* * Tell child process to kill itself after it * is resumed by adding NSIG to p_cursig. [see issig] */ proc_unlock(t); psignal(t, SIGKILL); goto resume; case PT_STEP: /* single step the child */ case PT_CONTINUE: /* continue the child */ proc_unlock(t); th_act = (thread_t)get_firstthread(task); if (th_act == THREAD_NULL) { error = EINVAL; goto out; } if (uap->addr != (user_addr_t)1) { #if defined(ppc) #define ALIGNED(addr,size) (((unsigned)(addr)&((size)-1))==0) if (!ALIGNED((int)uap->addr, sizeof(int))) return (ERESTART); #undef ALIGNED #endif thread_setentrypoint(th_act, uap->addr); } if ((unsigned)uap->data >= NSIG) { error = EINVAL; goto out; } if (uap->data != 0) { psignal(t, uap->data); } if (uap->req == PT_STEP) { /* * set trace bit */ if (thread_setsinglestep(th_act, 1) != KERN_SUCCESS) { error = ENOTSUP; goto out; } } else { /* * clear trace bit if on */ if (thread_setsinglestep(th_act, 0) != KERN_SUCCESS) { error = ENOTSUP; goto out; } } resume: proc_lock(t); t->p_xstat = uap->data; t->p_stat = SRUN; if (t->sigwait) { wakeup((caddr_t)&(t->sigwait)); proc_unlock(t); if ((t->p_lflag & P_LSIGEXC) == 0) { task_resume(task); } } else proc_unlock(t); break; case PT_THUPDATE: { proc_unlock(t); if ((unsigned)uap->data >= NSIG) { error = EINVAL; goto out; } th_act = port_name_to_thread(CAST_DOWN(mach_port_name_t, uap->addr)); if (th_act == THREAD_NULL) return (ESRCH); ut = (uthread_t)get_bsdthread_info(th_act); if (uap->data) ut->uu_siglist |= sigmask(uap->data); proc_lock(t); t->p_xstat = uap->data; t->p_stat = SRUN; proc_unlock(t); thread_deallocate(th_act); error = 0; } break; default: proc_unlock(t); error = EINVAL; goto out; } error = 0; out: proc_rele(t); return(error); }
/* * This routine frees all the BSD context in uthread except the credential. * It does not free the uthread structure as well */ void uthread_cleanup(task_t task, void *uthread, void * bsd_info) { struct _select *sel; uthread_t uth = (uthread_t)uthread; proc_t p = (proc_t)bsd_info; if (uth->uu_lowpri_window || uth->uu_throttle_info) { /* * task is marked as a low priority I/O type * and we've somehow managed to not dismiss the throttle * through the normal exit paths back to user space... * no need to throttle this thread since its going away * but we do need to update our bookeeping w/r to throttled threads * * Calling this routine will clean up any throttle info reference * still inuse by the thread. */ throttle_lowpri_io(FALSE); } /* * Per-thread audit state should never last beyond system * call return. Since we don't audit the thread creation/ * removal, the thread state pointer should never be * non-NULL when we get here. */ assert(uth->uu_ar == NULL); sel = &uth->uu_select; /* cleanup the select bit space */ if (sel->nbytes) { FREE(sel->ibits, M_TEMP); FREE(sel->obits, M_TEMP); sel->nbytes = 0; } if (uth->uu_cdir) { vnode_rele(uth->uu_cdir); uth->uu_cdir = NULLVP; } if (uth->uu_allocsize && uth->uu_wqset){ kfree(uth->uu_wqset, uth->uu_allocsize); sel->count = 0; uth->uu_allocsize = 0; uth->uu_wqset = 0; sel->wql = 0; } if(uth->pth_name != NULL) { kfree(uth->pth_name, MAXTHREADNAMESIZE); uth->pth_name = 0; } if ((task != kernel_task) && p) { if (((uth->uu_flag & UT_VFORK) == UT_VFORK) && (uth->uu_proc != PROC_NULL)) { vfork_exit_internal(uth->uu_proc, 0, 1); } /* * Remove the thread from the process list and * transfer [appropriate] pending signals to the process. */ if (get_bsdtask_info(task) == p) { proc_lock(p); TAILQ_REMOVE(&p->p_uthlist, uth, uu_list); p->p_siglist |= (uth->uu_siglist & execmask & (~p->p_sigignore | sigcantmask)); proc_unlock(p); } #if CONFIG_DTRACE struct dtrace_ptss_page_entry *tmpptr = uth->t_dtrace_scratch; uth->t_dtrace_scratch = NULL; if (tmpptr != NULL) { dtrace_ptss_release_entry(p, tmpptr); } #endif } }
void * uthread_alloc(task_t task, thread_t thread, int noinherit) { proc_t p; uthread_t uth; uthread_t uth_parent; void *ut; if (!uthread_zone_inited) uthread_zone_init(); ut = (void *)zalloc(uthread_zone); bzero(ut, sizeof(struct uthread)); p = (proc_t) get_bsdtask_info(task); uth = (uthread_t)ut; /* * Thread inherits credential from the creating thread, if both * are in the same task. * * If the creating thread has no credential or is from another * task we can leave the new thread credential NULL. If it needs * one later, it will be lazily assigned from the task's process. */ uth_parent = (uthread_t)get_bsdthread_info(current_thread()); if ((noinherit == 0) && task == current_task() && uth_parent != NULL && IS_VALID_CRED(uth_parent->uu_ucred)) { /* * XXX The new thread is, in theory, being created in context * XXX of parent thread, so a direct reference to the parent * XXX is OK. */ kauth_cred_ref(uth_parent->uu_ucred); uth->uu_ucred = uth_parent->uu_ucred; /* the credential we just inherited is an assumed credential */ if (uth_parent->uu_flag & UT_SETUID) uth->uu_flag |= UT_SETUID; } else { /* sometimes workqueue threads are created out task context */ if ((task != kernel_task) && (p != PROC_NULL)) uth->uu_ucred = kauth_cred_proc_ref(p); else uth->uu_ucred = NOCRED; } if ((task != kernel_task) && p) { proc_lock(p); if (noinherit != 0) { /* workq threads will not inherit masks */ uth->uu_sigmask = ~workq_threadmask; } else if (uth_parent) { if (uth_parent->uu_flag & UT_SAS_OLDMASK) uth->uu_sigmask = uth_parent->uu_oldmask; else uth->uu_sigmask = uth_parent->uu_sigmask; } uth->uu_context.vc_thread = thread; TAILQ_INSERT_TAIL(&p->p_uthlist, uth, uu_list); proc_unlock(p); #if CONFIG_DTRACE if (p->p_dtrace_ptss_pages != NULL) { uth->t_dtrace_scratch = dtrace_ptss_claim_entry(p); } #endif } return (ut); }
/* * The file size of a mach-o file is limited to 32 bits; this is because * this is the limit on the kalloc() of enough bytes for a mach_header and * the contents of its sizeofcmds, which is currently constrained to 32 * bits in the file format itself. We read into the kernel buffer the * commands section, and then parse it in order to parse the mach-o file * format load_command segment(s). We are only interested in a subset of * the total set of possible commands. If "map"==VM_MAP_NULL or * "thread"==THREAD_NULL, do not make permament VM modifications, * just preflight the parse. */ static load_return_t parse_machfile( uint8_t *vp, struct mach_header *header, off_t file_offset, off_t macho_size, int depth, int64_t aslr_offset, int64_t dyld_aslr_offset, load_result_t *result ) { uint32_t ncmds; struct load_command *lcp; struct dylinker_command *dlp = 0; integer_t dlarchbits = 0; void * control; load_return_t ret = LOAD_SUCCESS; caddr_t addr; vm_size_t size,kl_size; size_t offset; size_t oldoffset; /* for overflow check */ int pass; size_t mach_header_sz = sizeof(struct mach_header); boolean_t abi64; boolean_t got_code_signatures = FALSE; int64_t slide = 0; if (header->magic == MH_MAGIC_64 || header->magic == MH_CIGAM_64) { mach_header_sz = sizeof(struct mach_header_64); } /* * Break infinite recursion */ if (depth > 6) { printf("parse_machfile 1: %s\n", load_to_string(LOAD_FAILURE)); return(LOAD_FAILURE); } depth++; /* * Check to see if right machine type. */ // this should be implemented by qemu somehow. /*if (((cpu_type_t)(header->cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK)) || !grade_binary(header->cputype, header->cpusubtype & ~CPU_SUBTYPE_MASK)) return(LOAD_BADARCH);*/ abi64 = ((header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64); switch (header->filetype) { case MH_OBJECT: case MH_EXECUTE: case MH_PRELOAD: if (depth != 1) { printf("parse_machfile 2: %s\n", load_to_string(LOAD_FAILURE)); return (LOAD_FAILURE); } break; case MH_FVMLIB: case MH_DYLIB: if (depth == 1) { printf("parse_machfile 2: %s\n", load_to_string(LOAD_FAILURE)); return (LOAD_FAILURE); } break; case MH_DYLINKER: if (depth != 2) { printf("parse_machfile 3: %s\n", load_to_string(LOAD_FAILURE)); return (LOAD_FAILURE); } break; default: printf("parse_machfile 4: %s, header->filetype = %d\n", load_to_string(LOAD_FAILURE), header->filetype); return (LOAD_FAILURE); } /* * Map portion that must be accessible directly into * kernel's map. */ if ((off_t)(mach_header_sz + header->sizeofcmds) > macho_size) { printf("parse_machfile 5: %s header->sizeofcmds: %d macho_size %d\n", load_to_string(LOAD_BADMACHO), header->sizeofcmds, macho_size); return(LOAD_BADMACHO); } /* * Round size of Mach-O commands up to page boundry. */ size = round_page(mach_header_sz + header->sizeofcmds); if (size <= 0) { printf("parse_machfile 6: %s\n", load_to_string(LOAD_BADMACHO)); return(LOAD_BADMACHO); } /* * Map the load commands into kernel memory. */ addr = 0; kl_size = size; addr = (caddr_t)(vp); if (addr == NULL) { printf("parse_machfile 7: %s\n", load_to_string(LOAD_NOSPACE)); return(LOAD_NOSPACE); } /* * For PIE and dyld, slide everything by the ASLR offset. */ if ((header->flags & MH_PIE) || (header->filetype == MH_DYLINKER)) { slide = aslr_offset; } /* * Scan through the commands, processing each one as necessary. * We parse in three passes through the headers: * 1: thread state, uuid, code signature * 2: segments * 3: dyld, encryption, check entry point */ for (pass = 1; pass <= 3; pass++) { /* * Check that the entry point is contained in an executable segments */ if ((pass == 3) && (result->validentry == 0)) { ret = LOAD_FAILURE; break; } /* * Loop through each of the load_commands indicated by the * Mach-O header; if an absurd value is provided, we just * run off the end of the reserved section by incrementing * the offset too far, so we are implicitly fail-safe. */ offset = mach_header_sz; ncmds = header->ncmds; while (ncmds--) { /* * Get a pointer to the command. */ lcp = (struct load_command *)(addr + offset); oldoffset = offset; offset += lcp->cmdsize; /* * Perform prevalidation of the struct load_command * before we attempt to use its contents. Invalid * values are ones which result in an overflow, or * which can not possibly be valid commands, or which * straddle or exist past the reserved section at the * start of the image. */ if (oldoffset > offset || lcp->cmdsize < sizeof(struct load_command) || offset > header->sizeofcmds + mach_header_sz) { ret = LOAD_BADMACHO; break; } /* * Act on struct load_command's for which kernel * intervention is required. */ printf("Command: %s\n", command_to_string(lcp->cmd)); switch(lcp->cmd) { case LC_SEGMENT: if (pass != 2) break; if (abi64) { /* * Having an LC_SEGMENT command for the * wrong ABI is invalid <rdar://problem/11021230> */ ret = LOAD_BADMACHO; break; } ret = load_segment(lcp, header->filetype, control, file_offset, macho_size, vp, slide, result); break; case LC_SEGMENT_64: if (pass != 2) break; if (!abi64) { /* * Having an LC_SEGMENT_64 command for the * wrong ABI is invalid <rdar://problem/11021230> */ ret = LOAD_BADMACHO; break; } ret = load_segment(lcp, header->filetype, control, file_offset, macho_size, vp, slide, result); break; case LC_UNIXTHREAD: if (pass != 1) break; ret = load_unixthread( (struct thread_command *) lcp, result); break; case LC_MAIN: if (pass != 1) break; if (depth != 1) break; ret = load_main( (struct entry_point_command *) lcp, result); break; case LC_LOAD_DYLINKER: if (pass != 3) break; if ((depth == 1) && (dlp == 0)) { dlp = (struct dylinker_command *)lcp; dlarchbits = (header->cputype & CPU_ARCH_MASK); } else { ret = LOAD_FAILURE; } break; case LC_UUID: if (pass == 1 && depth == 1) { ret = load_uuid((struct uuid_command *) lcp, (char *)addr + mach_header_sz + header->sizeofcmds, result); } break; case LC_CODE_SIGNATURE: /* CODE SIGNING */ if (pass != 1) break; /* pager -> uip -> load signatures & store in uip set VM object "signed_pages" */ /*ret = load_code_signature( (struct linkedit_data_command *) lcp, vp, file_offset, macho_size, header->cputype, result);*/ if (ret != LOAD_SUCCESS) { printf("proc: load code signature error %d ", ret); ret = LOAD_SUCCESS; /* ignore error */ } else { got_code_signatures = TRUE; } break; #if CONFIG_CODE_DECRYPTION case LC_ENCRYPTION_INFO: case LC_ENCRYPTION_INFO_64: if (pass != 3) break; ret = set_code_unprotect( (struct encryption_info_command *) lcp, addr, map, slide, vp, header->cputype, header->cpusubtype); if (ret != LOAD_SUCCESS) { printf("proc %d: set_code_unprotect() error %d " "for file \"%s\"\n", p->p_pid, ret, vp->v_name); /* * Don't let the app run if it's * encrypted but we failed to set up the * decrypter. If the keys are missing it will * return LOAD_DECRYPTFAIL. */ if (ret == LOAD_DECRYPTFAIL) { /* failed to load due to missing FP keys */ proc_lock(p); p->p_lflag |= P_LTERM_DECRYPTFAIL; proc_unlock(p); } psignal(p, SIGKILL); } break; #endif default: /* Other commands are ignored by the kernel */ ret = LOAD_SUCCESS; break; } printf("parse_machfile 9: ret %s\n", load_to_string(ret)); if (ret != LOAD_SUCCESS) break; } if (ret != LOAD_SUCCESS) break; } if (ret == LOAD_SUCCESS) { if (! got_code_signatures) { //struct cs_blob *blob; /* no embedded signatures: look for detached ones */ //blob = ubc_cs_blob_get(vp, -1, file_offset); //if (blob != NULL) { //unsigned int cs_flag_data = blob->csb_flags; //if(0 != ubc_cs_generation_check(vp)) { // if (0 != ubc_cs_blob_revalidate(vp, blob)) { // /* clear out the flag data if revalidation fails */ // cs_flag_data = 0; // result->csflags &= ~CS_VALID; // } //} /* get flags to be applied to the process */ //result->csflags |= cs_flag_data; //} } /* Make sure if we need dyld, we got it */ if (result->needs_dynlinker && !dlp) { ret = LOAD_FAILURE; } if ((ret == LOAD_SUCCESS) && (dlp != 0)) { /* * load the dylinker, and slide it by the independent DYLD ASLR * offset regardless of the PIE-ness of the main binary. */ ret = load_dylinker(dlp, dlarchbits, depth, dyld_aslr_offset, result); } if((ret == LOAD_SUCCESS) && (depth == 1)) { if (result->thread_count == 0) { ret = LOAD_FAILURE; } } } printf("parse_machfile 8: %s\n", load_to_string(ret)); return(ret); }