/* * lck_rw_clear_promotion: Undo priority promotions when the last RW * lock is released by a thread (if a promotion was active) */ void lck_rw_clear_promotion(thread_t thread) { assert(thread->rwlock_count == 0); /* Cancel any promotions if the thread had actually blocked while holding a RW lock */ spl_t s = splsched(); thread_lock(thread); if (thread->sched_flags & TH_SFLAG_RW_PROMOTED) { thread->sched_flags &= ~TH_SFLAG_RW_PROMOTED; if (thread->sched_flags & TH_SFLAG_PROMOTED) { /* Thread still has a mutex promotion */ } else if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) { KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RW_DEMOTE) | DBG_FUNC_NONE, thread->sched_pri, DEPRESSPRI, 0, 0, 0); set_sched_pri(thread, DEPRESSPRI); } else { KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RW_DEMOTE) | DBG_FUNC_NONE, thread->sched_pri, thread->base_pri, 0, 0, 0); thread_recompute_sched_pri(thread, FALSE); } } thread_unlock(thread); splx(s); }
/* * install_special_handler_locked: * * Do the work of installing the special_handler. * * Called with the thread mutex and scheduling lock held. */ void install_special_handler_locked( thread_t thread) { /* * Temporarily undepress, so target has * a chance to do locking required to * block itself in special_handler(). */ if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) thread_recompute_sched_pri(thread, TRUE); thread_ast_set(thread, AST_APC); if (thread == current_thread()) ast_propagate(thread->ast); else { processor_t processor = thread->last_processor; if ( processor != PROCESSOR_NULL && processor->state == PROCESSOR_RUNNING && processor->active_thread == thread ) cause_ast_check(processor); } }
/* * thread_set_apc_ast_locked: * * Do the work of registering for the AST_APC callback. * * Called with the thread mutex and scheduling lock held. */ static void thread_set_apc_ast_locked(thread_t thread) { /* * Temporarily undepress, so target has * a chance to do locking required to * block itself in thread_suspended. * * Leaves the depress flag set so we can reinstate when it's blocked. */ if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) thread_recompute_sched_pri(thread, TRUE); thread_ast_set(thread, AST_APC); if (thread == current_thread()) { ast_propagate(thread); } else { processor_t processor = thread->last_processor; if (processor != PROCESSOR_NULL && processor->state == PROCESSOR_RUNNING && processor->active_thread == thread) { cause_ast_check(processor); } } }
/* * Priority depression expiration. */ void thread_depress_expire( void *p0, __unused void *p1) { thread_t thread = p0; spl_t s; s = splsched(); thread_lock(thread); if (--thread->depress_timer_active == 0) { thread->sched_flags &= ~TH_SFLAG_DEPRESSED_MASK; thread_recompute_sched_pri(thread, FALSE); } thread_unlock(thread); splx(s); }
/* * Routine: lck_mtx_clear_promoted * * Handle clearing of TH_SFLAG_PROMOTED, * adjusting thread priority as needed. * * Called with thread lock held */ static void lck_mtx_clear_promoted ( thread_t thread, __kdebug_only uintptr_t trace_lck) { thread->sched_flags &= ~TH_SFLAG_PROMOTED; if (thread->sched_flags & TH_SFLAG_RW_PROMOTED) { /* Thread still has a RW lock promotion */ } else if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) { KERNEL_DEBUG_CONSTANT( MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) | DBG_FUNC_NONE, thread->sched_pri, DEPRESSPRI, 0, trace_lck, 0); set_sched_pri(thread, DEPRESSPRI); } else { if (thread->base_pri < thread->sched_pri) { KERNEL_DEBUG_CONSTANT( MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) | DBG_FUNC_NONE, thread->sched_pri, thread->base_pri, 0, trace_lck, 0); } thread_recompute_sched_pri(thread, FALSE); } }
/* * Prematurely abort priority depression if there is one. */ kern_return_t thread_depress_abort_internal( thread_t thread) { kern_return_t result = KERN_NOT_DEPRESSED; spl_t s; s = splsched(); thread_lock(thread); if (!(thread->sched_flags & TH_SFLAG_POLLDEPRESS)) { if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) { thread->sched_flags &= ~TH_SFLAG_DEPRESSED_MASK; thread_recompute_sched_pri(thread, FALSE); result = KERN_SUCCESS; } if (timer_call_cancel(&thread->depress_timer)) thread->depress_timer_active--; } thread_unlock(thread); splx(s); return (result); }