kern_return_t thread_get_state( register thread_t thread, int flavor, thread_state_t state, /* pointer to OUT array */ mach_msg_type_number_t *state_count) /*IN/OUT*/ { kern_return_t result = KERN_SUCCESS; if (thread == THREAD_NULL) return (KERN_INVALID_ARGUMENT); thread_mtx_lock(thread); if (thread->active) { if (thread != current_thread()) { thread_hold(thread); thread_mtx_unlock(thread); if (thread_stop(thread, FALSE)) { thread_mtx_lock(thread); result = machine_thread_get_state( thread, flavor, state, state_count); thread_unstop(thread); } else { thread_mtx_lock(thread); result = KERN_ABORTED; } thread_release(thread); } else result = machine_thread_get_state( thread, flavor, state, state_count); } else if (thread->inspection) { result = machine_thread_get_state( thread, flavor, state, state_count); } else result = KERN_TERMINATED; thread_mtx_unlock(thread); return (result); }
/* * Change thread's machine-dependent state. Called with nothing * locked. Returns same way. */ static kern_return_t thread_set_state_internal( register thread_t thread, int flavor, thread_state_t state, mach_msg_type_number_t state_count, boolean_t from_user) { kern_return_t result = KERN_SUCCESS; if (thread == THREAD_NULL) return (KERN_INVALID_ARGUMENT); thread_mtx_lock(thread); if (thread->active) { if (thread != current_thread()) { thread_hold(thread); thread_mtx_unlock(thread); if (thread_stop(thread, TRUE)) { thread_mtx_lock(thread); result = machine_thread_set_state( thread, flavor, state, state_count); thread_unstop(thread); } else { thread_mtx_lock(thread); result = KERN_ABORTED; } thread_release(thread); } else result = machine_thread_set_state( thread, flavor, state, state_count); } else result = KERN_TERMINATED; if ((result == KERN_SUCCESS) && from_user) extmod_statistics_incr_thread_set_state(thread); thread_mtx_unlock(thread); return (result); }
kern_return_t thread_abort_safely( thread_t thread) { kern_return_t result = KERN_SUCCESS; if (thread == THREAD_NULL) return (KERN_INVALID_ARGUMENT); thread_mtx_lock(thread); if (thread->active) { spl_t s = splsched(); thread_lock(thread); if (!thread->at_safe_point || clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) { if (!(thread->sched_flags & TH_SFLAG_ABORT)) { thread->sched_flags |= TH_SFLAG_ABORTED_MASK; install_special_handler_locked(thread); } } thread_unlock(thread); splx(s); } else result = KERN_TERMINATED; thread_mtx_unlock(thread); return (result); }
kern_return_t thread_get_special_port( thread_t thread, int which, ipc_port_t *portp) { kern_return_t result = KERN_SUCCESS; ipc_port_t *whichp; if (thread == THREAD_NULL) return (KERN_INVALID_ARGUMENT); switch (which) { case THREAD_KERNEL_PORT: whichp = &thread->ith_sself; break; default: return (KERN_INVALID_ARGUMENT); } thread_mtx_lock(thread); if (thread->active) *portp = ipc_port_copy_send(*whichp); else result = KERN_FAILURE; thread_mtx_unlock(thread); return (result); }
/* * special_handler_continue * * Continuation routine for the special handler blocks. It checks * to see whether there has been any new suspensions. If so, it * installs the special handler again. Otherwise, it checks to see * if the current depression needs to be re-instated (it may have * been temporarily removed in order to get to this point in a hurry). */ void special_handler_continue(void) { thread_t thread = current_thread(); thread_mtx_lock(thread); if (thread->suspend_count > 0) install_special_handler(thread); else { spl_t s = splsched(); thread_lock(thread); if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) { processor_t myprocessor = thread->last_processor; thread->sched_pri = DEPRESSPRI; myprocessor->current_pri = thread->sched_pri; } thread_unlock(thread); splx(s); } thread_mtx_unlock(thread); thread_exception_return(); /*NOTREACHED*/ }
kern_return_t thread_suspend( register thread_t thread) { thread_t self = current_thread(); kern_return_t result = KERN_SUCCESS; if (thread == THREAD_NULL || thread->task == kernel_task) return (KERN_INVALID_ARGUMENT); thread_mtx_lock(thread); if (thread->active) { if ( thread->user_stop_count++ == 0 && thread->suspend_count++ == 0 ) { install_special_handler(thread); if (thread != self) thread_wakeup_one(&thread->suspend_count); } } else result = KERN_TERMINATED; thread_mtx_unlock(thread); if (thread != self && result == KERN_SUCCESS) thread_wait(thread, FALSE); return (result); }
/* * Internal routine to terminate a thread. * Sometimes called with task already locked. */ kern_return_t thread_terminate_internal( thread_t thread) { kern_return_t result = KERN_SUCCESS; thread_mtx_lock(thread); if (thread->active) { thread->active = FALSE; act_abort(thread); if (thread->started) clear_wait(thread, THREAD_INTERRUPTED); else { thread_start_internal(thread); } } else result = KERN_TERMINATED; if (thread->affinity_set != NULL) thread_affinity_terminate(thread); thread_mtx_unlock(thread); if (thread != current_thread() && result == KERN_SUCCESS) thread_wait(thread, FALSE); return (result); }
/* * special_handler - handles suspension, termination. Called * with nothing locked. Returns (if it returns) the same way. */ void special_handler( thread_t thread) { spl_t s; thread_mtx_lock(thread); s = splsched(); thread_lock(thread); thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK; thread_unlock(thread); splx(s); /* * If we're suspended, go to sleep and wait for someone to wake us up. */ if (thread->active) { if (thread->suspend_count > 0) { assert_wait(&thread->suspend_count, THREAD_ABORTSAFE); thread_mtx_unlock(thread); thread_block((thread_continue_t)special_handler_continue); /*NOTREACHED*/ } } else { thread_mtx_unlock(thread); thread_terminate_self(); /*NOTREACHED*/ } thread_mtx_unlock(thread); }
kern_return_t thread_resume(thread_t thread) { kern_return_t result = KERN_SUCCESS; if (thread == THREAD_NULL || thread->task == kernel_task) return (KERN_INVALID_ARGUMENT); thread_mtx_lock(thread); if (thread->active) { if (thread->user_stop_count > 0) { if (--thread->user_stop_count == 0) thread_release(thread); } else { result = KERN_FAILURE; } } else { result = KERN_TERMINATED; } thread_mtx_unlock(thread); return (result); }
ipc_port_t retrieve_thread_self_fast( thread_t thread) { register ipc_port_t port; assert(thread == current_thread()); thread_mtx_lock(thread); assert(thread->ith_self != IP_NULL); if ((port = thread->ith_sself) == thread->ith_self) { /* no interposing */ ip_lock(port); assert(ip_active(port)); ip_reference(port); port->ip_srights++; ip_unlock(port); } else port = ipc_port_copy_send(port); thread_mtx_unlock(thread); return port; }
kern_return_t thread_resume( register thread_t thread) { kern_return_t result = KERN_SUCCESS; if (thread == THREAD_NULL || thread->task == kernel_task) return (KERN_INVALID_ARGUMENT); thread_mtx_lock(thread); if (thread->active) { if (thread->user_stop_count > 0) { if ( --thread->user_stop_count == 0 && --thread->suspend_count == 0 ) { if (thread->started) thread_wakeup_one(&thread->suspend_count); else { thread_start_internal(thread); } } } else result = KERN_FAILURE; } else result = KERN_TERMINATED; thread_mtx_unlock(thread); return (result); }
kern_return_t thread_dup2( thread_t source, thread_t target) { kern_return_t result = KERN_SUCCESS; uint32_t active = 0; if (source == THREAD_NULL || target == THREAD_NULL || target == source) return (KERN_INVALID_ARGUMENT); thread_mtx_lock(source); active = source->active; thread_mtx_unlock(source); if (!active) { return KERN_TERMINATED; } thread_mtx_lock(target); if (target->active || target->inspection) { thread_hold(target); thread_mtx_unlock(target); if (thread_stop(target, TRUE)) { thread_mtx_lock(target); result = machine_thread_dup(source, target); if (source->affinity_set != AFFINITY_SET_NULL) thread_affinity_dup(source, target); thread_unstop(target); } else { thread_mtx_lock(target); result = KERN_ABORTED; } thread_release(target); } else result = KERN_TERMINATED; thread_mtx_unlock(target); return (result); }
void thread_static_param( thread_t thread, boolean_t state) { thread_mtx_lock(thread); thread->static_param = state; thread_mtx_unlock(thread); }
void ipc_thread_reset( thread_t thread) { ipc_port_t old_kport, new_kport; ipc_port_t old_sself; ipc_port_t old_exc_actions[EXC_TYPES_COUNT]; int i; new_kport = ipc_port_alloc_kernel(); if (new_kport == IP_NULL) panic("ipc_task_reset"); thread_mtx_lock(thread); old_kport = thread->ith_self; if (old_kport == IP_NULL) { /* the is already terminated (can this happen?) */ thread_mtx_unlock(thread); ipc_port_dealloc_kernel(new_kport); return; } thread->ith_self = new_kport; old_sself = thread->ith_sself; thread->ith_sself = ipc_port_make_send(new_kport); ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE); ipc_kobject_set(new_kport, (ipc_kobject_t) thread, IKOT_THREAD); for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { if (!thread->exc_actions[i].privileged) { old_exc_actions[i] = thread->exc_actions[i].port; thread->exc_actions[i].port = IP_NULL; } else { old_exc_actions[i] = IP_NULL; } }/* for */ thread_mtx_unlock(thread); /* release the naked send rights */ if (IP_VALID(old_sself)) ipc_port_release_send(old_sself); for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { if (IP_VALID(old_exc_actions[i])) { ipc_port_release_send(old_exc_actions[i]); } }/* for */ /* destroy the kernel port */ ipc_port_dealloc_kernel(old_kport); }
/* * Change thread's machine-dependent userspace TSD base. * Called with nothing locked. Returns same way. */ kern_return_t thread_set_tsd_base( thread_t thread, mach_vm_offset_t tsd_base) { kern_return_t result = KERN_SUCCESS; if (thread == THREAD_NULL) return (KERN_INVALID_ARGUMENT); thread_mtx_lock(thread); if (thread->active) { if (thread != current_thread()) { thread_hold(thread); thread_mtx_unlock(thread); if (thread_stop(thread, TRUE)) { thread_mtx_lock(thread); result = machine_thread_set_tsd_base(thread, tsd_base); thread_unstop(thread); } else { thread_mtx_lock(thread); result = KERN_ABORTED; } thread_release(thread); } else result = machine_thread_set_tsd_base(thread, tsd_base); } else result = KERN_TERMINATED; thread_mtx_unlock(thread); return (result); }
kern_return_t thread_dup( register thread_t target) { thread_t self = current_thread(); kern_return_t result = KERN_SUCCESS; if (target == THREAD_NULL || target == self) return (KERN_INVALID_ARGUMENT); thread_mtx_lock(target); if (target->active) { thread_hold(target); thread_mtx_unlock(target); if (thread_stop(target, TRUE)) { thread_mtx_lock(target); result = machine_thread_dup(self, target); if (self->affinity_set != AFFINITY_SET_NULL) thread_affinity_dup(self, target); thread_unstop(target); } else { thread_mtx_lock(target); result = KERN_ABORTED; } thread_release(target); } else result = KERN_TERMINATED; thread_mtx_unlock(target); return (result); }
kern_return_t thread_policy_destroy_work_interval( thread_t thread, uint64_t work_interval_id) { thread_mtx_lock(thread); if (work_interval_id == 0 || thread->work_interval_id == 0 || thread->work_interval_id != work_interval_id) { /* work ID isn't valid or doesn't match previously assigned work interval ID */ thread_mtx_unlock(thread); return (KERN_INVALID_ARGUMENT); } thread->work_interval_id = 0; thread_mtx_unlock(thread); return KERN_SUCCESS; }
kern_return_t thread_policy_create_work_interval( thread_t thread, uint64_t *work_interval_id) { thread_mtx_lock(thread); if (thread->work_interval_id) { /* already assigned a work interval ID */ thread_mtx_unlock(thread); return (KERN_INVALID_VALUE); } thread->work_interval_id = OSIncrementAtomic64((volatile int64_t *)&unique_work_interval_id); *work_interval_id = thread->work_interval_id; thread_mtx_unlock(thread); return KERN_SUCCESS; }
static kern_return_t thread_create_running_internal2( register task_t task, int flavor, thread_state_t new_state, mach_msg_type_number_t new_state_count, thread_t *new_thread, boolean_t from_user) { register kern_return_t result; thread_t thread; if (task == TASK_NULL || task == kernel_task) return (KERN_INVALID_ARGUMENT); result = thread_create_internal(task, -1, (thread_continue_t)thread_bootstrap_return, TH_OPTION_NONE, &thread); if (result != KERN_SUCCESS) return (result); result = machine_thread_set_state( thread, flavor, new_state, new_state_count); if (result != KERN_SUCCESS) { task_unlock(task); lck_mtx_unlock(&tasks_threads_lock); thread_terminate(thread); thread_deallocate(thread); return (result); } thread_mtx_lock(thread); thread_start_internal(thread); thread_mtx_unlock(thread); if (from_user) extmod_statistics_incr_thread_create(task); task_unlock(task); lck_mtx_unlock(&tasks_threads_lock); *new_thread = thread; return (result); }
ipc_port_t convert_thread_to_port( thread_t thread) { ipc_port_t port; thread_mtx_lock(thread); if (thread->ith_self != IP_NULL) port = ipc_port_make_send(thread->ith_self); else port = IP_NULL; thread_mtx_unlock(thread); thread_deallocate(thread); return (port); }
/* * thread_depress_abort: * * Prematurely abort priority depression if there is one. */ kern_return_t thread_depress_abort( register thread_t thread) { kern_return_t result; if (thread == THREAD_NULL) return (KERN_INVALID_ARGUMENT); thread_mtx_lock(thread); if (thread->active) result = thread_depress_abort_internal(thread); else result = KERN_TERMINATED; thread_mtx_unlock(thread); return (result); }
/* called with task lock locked */ void thread_recompute_qos(thread_t thread) { spl_t s; thread_mtx_lock(thread); if (!thread->active) { thread_mtx_unlock(thread); return; } s = splsched(); thread_lock(thread); thread_recompute_priority(thread); thread_unlock(thread); splx(s); thread_mtx_unlock(thread); }
static void thread_suspended(__unused void *parameter, wait_result_t result) { thread_t thread = current_thread(); thread_mtx_lock(thread); if (result == THREAD_INTERRUPTED) thread->suspend_parked = FALSE; else assert(thread->suspend_parked == FALSE); if (thread->suspend_count > 0) { thread_set_apc_ast(thread); } else { spl_t s = splsched(); thread_lock(thread); if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) { thread->sched_pri = DEPRESSPRI; thread->last_processor->current_pri = thread->sched_pri; thread->last_processor->current_perfctl_class = thread_get_perfcontrol_class(thread); KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY), (uintptr_t)thread_tid(thread), thread->base_pri, thread->sched_pri, 0, /* eventually, 'reason' */ 0); } thread_unlock(thread); splx(s); } thread_mtx_unlock(thread); thread_exception_return(); /*NOTREACHED*/ }
kern_return_t kernel_thread_start_priority( thread_continue_t continuation, void *parameter, integer_t priority, thread_t *new_thread) { kern_return_t result; thread_t thread; result = kernel_thread_create(continuation, parameter, priority, &thread); if (result != KERN_SUCCESS) return (result); *new_thread = thread; thread_mtx_lock(thread); thread_start_internal(thread); thread_mtx_unlock(thread); return (result); }
kern_return_t thread_abort( register thread_t thread) { kern_return_t result = KERN_SUCCESS; if (thread == THREAD_NULL) return (KERN_INVALID_ARGUMENT); thread_mtx_lock(thread); if (thread->active) { act_abort(thread); clear_wait(thread, THREAD_INTERRUPTED); } else result = KERN_TERMINATED; thread_mtx_unlock(thread); return (result); }
kern_return_t thread_set_special_port( thread_t thread, int which, ipc_port_t port) { kern_return_t result = KERN_SUCCESS; ipc_port_t *whichp, old = IP_NULL; if (thread == THREAD_NULL) return (KERN_INVALID_ARGUMENT); switch (which) { case THREAD_KERNEL_PORT: whichp = &thread->ith_sself; break; default: return (KERN_INVALID_ARGUMENT); } thread_mtx_lock(thread); if (thread->active) { old = *whichp; *whichp = port; } else result = KERN_FAILURE; thread_mtx_unlock(thread); if (IP_VALID(old)) ipc_port_release_send(old); return (result); }
/* * thread_apc_ast - handles AST_APC and drives thread suspension and termination. * Called with nothing locked. Returns (if it returns) the same way. */ void thread_apc_ast(thread_t thread) { thread_mtx_lock(thread); assert(thread->suspend_parked == FALSE); spl_t s = splsched(); thread_lock(thread); /* TH_SFLAG_POLLDEPRESS is OK to have here */ assert((thread->sched_flags & TH_SFLAG_DEPRESS) == 0); thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK; thread_unlock(thread); splx(s); if (!thread->active) { /* Thread is ready to terminate, time to tear it down */ thread_mtx_unlock(thread); thread_terminate_self(); /*NOTREACHED*/ } /* If we're suspended, go to sleep and wait for someone to wake us up. */ if (thread->suspend_count > 0) { thread->suspend_parked = TRUE; assert_wait(&thread->suspend_count, THREAD_ABORTSAFE); thread_mtx_unlock(thread); thread_block(thread_suspended); /*NOTREACHED*/ } thread_mtx_unlock(thread); }
void act_execute_returnhandlers(void) { thread_t thread = current_thread(); thread_ast_clear(thread, AST_APC); spllo(); for (;;) { ReturnHandler *rh; thread_mtx_lock(thread); (void)splsched(); thread_lock(thread); rh = thread->handlers; if (rh != NULL) { thread->handlers = rh->next; thread_unlock(thread); spllo(); thread_mtx_unlock(thread); /* Execute it */ (*rh->handler)(rh, thread); } else break; } thread_unlock(thread); spllo(); thread_mtx_unlock(thread); }
kern_return_t thread_suspend(thread_t thread) { kern_return_t result = KERN_SUCCESS; if (thread == THREAD_NULL || thread->task == kernel_task) return (KERN_INVALID_ARGUMENT); thread_mtx_lock(thread); if (thread->active) { if (thread->user_stop_count++ == 0) thread_hold(thread); } else { result = KERN_TERMINATED; } thread_mtx_unlock(thread); if (thread != current_thread() && result == KERN_SUCCESS) thread_wait(thread, FALSE); return (result); }
kern_return_t thread_info( thread_t thread, thread_flavor_t flavor, thread_info_t thread_info_out, mach_msg_type_number_t *thread_info_count) { kern_return_t result; if (thread == THREAD_NULL) return (KERN_INVALID_ARGUMENT); thread_mtx_lock(thread); if (thread->active) result = thread_info_internal( thread, flavor, thread_info_out, thread_info_count); else result = KERN_TERMINATED; thread_mtx_unlock(thread); return (result); }