/* * Internal routine to terminate a thread. * Sometimes called with task already locked. */ kern_return_t thread_terminate_internal( thread_t thread) { kern_return_t result = KERN_SUCCESS; thread_mtx_lock(thread); if (thread->active) { thread->active = FALSE; act_abort(thread); if (thread->started) clear_wait(thread, THREAD_INTERRUPTED); else { thread_start_internal(thread); } } else result = KERN_TERMINATED; if (thread->affinity_set != NULL) thread_affinity_terminate(thread); thread_mtx_unlock(thread); if (thread != current_thread() && result == KERN_SUCCESS) thread_wait(thread, FALSE); return (result); }
/* * Internal routine to mark a thread as started. * Always called with the thread mutex locked. */ void thread_start( thread_t thread) { clear_wait(thread, THREAD_AWAKENED); thread->started = TRUE; }
/* * Internal routine to mark a thread as started. * Always called with the thread locked. * * Note: function intentionally declared with the noinline attribute to * prevent multiple declaration of probe symbols in this file; we would * prefer "#pragma noinline", but gcc does not support it. * PR-6385749 -- the lwp-start probe should fire from within the context * of the newly created thread. Commented out for now, in case we * turn it into a dead code probe. */ void thread_start_internal( thread_t thread) { clear_wait(thread, THREAD_AWAKENED); thread->started = TRUE; // DTRACE_PROC1(lwp__start, thread_t, thread); }
/* * Decrement internal suspension count for thr_act, setting thread * runnable when count falls to zero. * * Called with act_lock held. */ void thread_release( register thread_act_t act) { thread_t thread = act->thread; if ( act->suspend_count > 0 && --act->suspend_count == 0 && thread != THREAD_NULL && thread->top_act == act ) { if (!act->started) { clear_wait(thread, THREAD_AWAKENED); act->started = TRUE; } else thread_wakeup_one(&act->suspend_count); } }
kern_return_t thread_abort( register thread_t thread) { kern_return_t result = KERN_SUCCESS; if (thread == THREAD_NULL) return (KERN_INVALID_ARGUMENT); thread_mtx_lock(thread); if (thread->active) { act_abort(thread); clear_wait(thread, THREAD_INTERRUPTED); } else result = KERN_TERMINATED; thread_mtx_unlock(thread); return (result); }
/* * Internal routine to terminate a thread. * Sometimes called with task already locked. */ kern_return_t thread_terminate_internal( register thread_act_t act) { kern_return_t result; thread_t thread; thread = act_lock_thread(act); if (!act->active) { act_unlock_thread(act); return (KERN_TERMINATED); } act_disable(act); result = act_abort(act, FALSE); /* * Make sure this thread enters the kernel * Must unlock the act, but leave the shuttle * captured in this act. */ if (thread != current_thread()) { act_unlock(act); if (thread_stop(thread)) thread_unstop(thread); else result = KERN_ABORTED; act_lock(act); } clear_wait(thread, act->started? THREAD_INTERRUPTED: THREAD_AWAKENED); act_unlock_thread(act); return (result); }
kern_return_t thread_abort( register thread_act_t act) { kern_return_t result; thread_t thread; if (act == THR_ACT_NULL) return (KERN_INVALID_ARGUMENT); thread = act_lock_thread(act); if (!act->active) { act_unlock_thread(act); return (KERN_TERMINATED); } result = act_abort(act, FALSE); clear_wait(thread, THREAD_INTERRUPTED); act_unlock_thread(act); return (result); }
kern_return_t thread_resume( register thread_act_t act) { kern_return_t result = KERN_SUCCESS; thread_t thread; if (act == THR_ACT_NULL || act->task == kernel_task) return (KERN_INVALID_ARGUMENT); thread = act_lock_thread(act); if (act->active) { if (act->user_stop_count > 0) { if ( --act->user_stop_count == 0 && --act->suspend_count == 0 && thread != THREAD_NULL && thread->top_act == act ) { if (!act->started) { clear_wait(thread, THREAD_AWAKENED); act->started = TRUE; } else thread_wakeup_one(&act->suspend_count); } } else result = KERN_FAILURE; } else result = KERN_TERMINATED; act_unlock_thread(act); return (result); }
/* * Routine: semaphore_wait_internal * * Decrements the semaphore count by one. If the count is * negative after the decrement, the calling thread blocks * (possibly at a continuation and/or with a timeout). * * Assumptions: * The reference * A reference is held on the signal semaphore. */ kern_return_t semaphore_wait_internal( semaphore_t wait_semaphore, semaphore_t signal_semaphore, mach_timespec_t *wait_timep, void (*caller_cont)(kern_return_t)) { boolean_t nonblocking; int wait_result; spl_t spl_level; kern_return_t kr = KERN_ALREADY_WAITING; spl_level = splsched(); semaphore_lock(wait_semaphore); /* * Decide if we really have to wait. */ nonblocking = (wait_timep != (mach_timespec_t *)0) ? (wait_timep->tv_sec == 0 && wait_timep->tv_nsec == 0) : FALSE; if (!wait_semaphore->active) { kr = KERN_TERMINATED; } else if (wait_semaphore->count > 0) { wait_semaphore->count--; kr = KERN_SUCCESS; } else if (nonblocking) { kr = KERN_OPERATION_TIMED_OUT; } else { uint64_t abstime; thread_t self = current_thread(); wait_semaphore->count = -1; /* we don't keep an actual count */ thread_lock(self); /* * If it is a timed wait, calculate the wake up deadline. */ if (wait_timep != (mach_timespec_t *)0) { nanoseconds_to_absolutetime((uint64_t)wait_timep->tv_sec * NSEC_PER_SEC + wait_timep->tv_nsec, &abstime); clock_absolutetime_interval_to_deadline(abstime, &abstime); } else abstime = 0; (void)wait_queue_assert_wait64_locked( &wait_semaphore->wait_queue, SEMAPHORE_EVENT, THREAD_ABORTSAFE, abstime, self); thread_unlock(self); } semaphore_unlock(wait_semaphore); splx(spl_level); /* * wait_semaphore is unlocked so we are free to go ahead and * signal the signal_semaphore (if one was provided). */ if (signal_semaphore != SEMAPHORE_NULL) { kern_return_t signal_kr; /* * lock the signal semaphore reference we got and signal it. * This will NOT block (we cannot block after having asserted * our intention to wait above). */ signal_kr = semaphore_signal_internal(signal_semaphore, THREAD_NULL, SEMAPHORE_SIGNAL_PREPOST); if (signal_kr == KERN_NOT_WAITING) signal_kr = KERN_SUCCESS; else if (signal_kr == KERN_TERMINATED) { /* * Uh!Oh! The semaphore we were to signal died. * We have to get ourselves out of the wait in * case we get stuck here forever (it is assumed * that the semaphore we were posting is gating * the decision by someone else to post the * semaphore we are waiting on). People will * discover the other dead semaphore soon enough. * If we got out of the wait cleanly (someone * already posted a wakeup to us) then return that * (most important) result. Otherwise, * return the KERN_TERMINATED status. */ thread_t self = current_thread(); clear_wait(self, THREAD_INTERRUPTED); kr = semaphore_convert_wait_result(self->wait_result); if (kr == KERN_ABORTED) kr = KERN_TERMINATED; } } /* * If we had an error, or we didn't really need to wait we can * return now that we have signalled the signal semaphore. */ if (kr != KERN_ALREADY_WAITING) return kr; /* * Now, we can block. If the caller supplied a continuation * pointer of his own for after the block, block with the * appropriate semaphore continuation. Thiswill gather the * semaphore results, release references on the semaphore(s), * and then call the caller's continuation. */ if (caller_cont) { thread_t self = current_thread(); self->sth_continuation = caller_cont; self->sth_waitsemaphore = wait_semaphore; self->sth_signalsemaphore = signal_semaphore; wait_result = thread_block((thread_continue_t)semaphore_wait_continue); } else { wait_result = thread_block(THREAD_CONTINUE_NULL); } return (semaphore_convert_wait_result(wait_result)); }