/* * lck_rw_clear_promotion: Undo priority promotions when the last RW * lock is released by a thread (if a promotion was active) */ void lck_rw_clear_promotion(thread_t thread) { assert(thread->rwlock_count == 0); /* Cancel any promotions if the thread had actually blocked while holding a RW lock */ spl_t s = splsched(); thread_lock(thread); if (thread->sched_flags & TH_SFLAG_RW_PROMOTED) { thread->sched_flags &= ~TH_SFLAG_RW_PROMOTED; if (thread->sched_flags & TH_SFLAG_PROMOTED) { /* Thread still has a mutex promotion */ } else if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) { KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RW_DEMOTE) | DBG_FUNC_NONE, thread->sched_pri, DEPRESSPRI, 0, 0, 0); set_sched_pri(thread, DEPRESSPRI); } else { KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RW_DEMOTE) | DBG_FUNC_NONE, thread->sched_pri, thread->base_pri, 0, 0, 0); thread_recompute_sched_pri(thread, FALSE); } } thread_unlock(thread); splx(s); }
/* * Callout from context switch if the thread goes * off core with a positive rwlock_count * * Called at splsched with the thread locked */ void lck_rw_set_promotion_locked(thread_t thread) { if (LcksOpts & disLkRWPrio) return; integer_t priority; priority = thread->sched_pri; if (priority < thread->base_pri) priority = thread->base_pri; if (priority < BASEPRI_BACKGROUND) priority = BASEPRI_BACKGROUND; if ((thread->sched_pri < priority) || !(thread->sched_flags & TH_SFLAG_RW_PROMOTED)) { KERNEL_DEBUG_CONSTANT( MACHDBG_CODE(DBG_MACH_SCHED, MACH_RW_PROMOTE) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), thread->sched_pri, thread->base_pri, priority, 0); thread->sched_flags |= TH_SFLAG_RW_PROMOTED; if (thread->sched_pri < priority) set_sched_pri(thread, priority); } }
/* * Routine: lck_mtx_unlock_wakeup * * Invoked on unlock when there is contention. * * Called with the interlock locked. */ void lck_mtx_unlock_wakeup ( lck_mtx_t *lck, thread_t holder) { thread_t thread = current_thread(); lck_mtx_t *mutex; if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) mutex = lck; else mutex = &lck->lck_mtx_ptr->lck_mtx; if (thread != holder) panic("lck_mtx_unlock_wakeup: mutex %p holder %p\n", mutex, holder); KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_START, (int)lck, (int)holder, 0, 0, 0); assert(mutex->lck_mtx_waiters > 0); thread_wakeup_one((event_t)(((unsigned int*)lck)+(sizeof(lck_mtx_t)-1)/sizeof(unsigned int))); if (thread->promotions > 0) { spl_t s = splsched(); thread_lock(thread); if ( --thread->promotions == 0 && (thread->sched_flags & TH_SFLAG_PROMOTED) ) { thread->sched_flags &= ~TH_SFLAG_PROMOTED; if (thread->sched_flags & TH_SFLAG_RW_PROMOTED) { /* Thread still has a RW lock promotion */ } else if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) { KERNEL_DEBUG_CONSTANT( MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) | DBG_FUNC_NONE, thread->sched_pri, DEPRESSPRI, 0, lck, 0); set_sched_pri(thread, DEPRESSPRI); } else { if (thread->priority < thread->sched_pri) { KERNEL_DEBUG_CONSTANT( MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) | DBG_FUNC_NONE, thread->sched_pri, thread->priority, 0, lck, 0); } SCHED(compute_priority)(thread, FALSE); } } thread_unlock(thread); splx(s); } KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_END, 0, 0, 0, 0, 0); }
/* * Routine: lck_mtx_lock_acquire * * Invoked on acquiring the mutex when there is * contention. * * Returns the current number of waiters. * * Called with the interlock locked. */ int lck_mtx_lock_acquire( lck_mtx_t *lck) { thread_t thread = current_thread(); lck_mtx_t *mutex; if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) mutex = lck; else mutex = &lck->lck_mtx_ptr->lck_mtx; if (thread->pending_promoter[thread->pending_promoter_index] == mutex) { thread->pending_promoter[thread->pending_promoter_index] = NULL; if (thread->pending_promoter_index > 0) thread->pending_promoter_index--; mutex->lck_mtx_waiters--; } if (mutex->lck_mtx_waiters > 0) { integer_t priority = mutex->lck_mtx_pri; spl_t s = splsched(); thread_lock(thread); thread->promotions++; thread->sched_flags |= TH_SFLAG_PROMOTED; if (thread->sched_pri < priority) { KERNEL_DEBUG_CONSTANT( MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE, thread->sched_pri, priority, 0, lck, 0); /* Do not promote past promotion ceiling */ assert(priority <= MAXPRI_PROMOTE); set_sched_pri(thread, priority); } thread_unlock(thread); splx(s); } else mutex->lck_mtx_pri = 0; #if CONFIG_DTRACE if (lockstat_probemap[LS_LCK_MTX_LOCK_ACQUIRE] || lockstat_probemap[LS_LCK_MTX_EXT_LOCK_ACQUIRE]) { if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) { LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_ACQUIRE, lck, 0); } else { LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_ACQUIRE, lck, 0); } } #endif return (mutex->lck_mtx_waiters); }
/* * Routine: lck_mtx_lock_acquire * * Invoked on acquiring the mutex when there is * contention. * * Returns the current number of waiters. * * Called with the interlock locked. */ int lck_mtx_lock_acquire( lck_mtx_t *lck) { thread_t thread = current_thread(); lck_mtx_t *mutex; if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) mutex = lck; else mutex = &lck->lck_mtx_ptr->lck_mtx; if (thread->pending_promoter[thread->pending_promoter_index] == mutex) { thread->pending_promoter[thread->pending_promoter_index] = NULL; if (thread->pending_promoter_index > 0) thread->pending_promoter_index--; mutex->lck_mtx_waiters--; } if (mutex->lck_mtx_waiters > 0) { integer_t priority = mutex->lck_mtx_pri; spl_t s = splsched(); thread_lock(thread); thread->promotions++; thread->sched_mode |= TH_MODE_PROMOTED; if (thread->sched_pri < priority) { KERNEL_DEBUG_CONSTANT( MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE, thread->sched_pri, priority, 0, (int)lck, 0); set_sched_pri(thread, priority); } thread_unlock(thread); splx(s); } else mutex->lck_mtx_pri = 0; return (mutex->lck_mtx_waiters); }
/* * Routine: wait_queue_wakeup_one * Purpose: * Wakeup the most appropriate thread that is in the specified * wait queue for the specified event. * Conditions: * Nothing locked * Returns: * KERN_SUCCESS - Thread was woken up * KERN_NOT_WAITING - No thread was waiting <wq,event> pair */ kern_return_t wait_queue_wakeup_one( wait_queue_t wq, event_t event, wait_result_t result, int priority) { thread_t thread; spl_t s; if (!wait_queue_is_valid(wq)) { return KERN_INVALID_ARGUMENT; } s = splsched(); wait_queue_lock(wq); thread = _wait_queue_select64_one(wq, CAST_DOWN(event64_t,event)); wait_queue_unlock(wq); if (thread) { kern_return_t res; if (thread->sched_pri < priority) { if (priority <= MAXPRI) { set_sched_pri(thread, priority); thread->was_promoted_on_wakeup = 1; thread->sched_flags |= TH_SFLAG_PROMOTED; } } res = thread_go(thread, result); assert(res == KERN_SUCCESS); thread_unlock(thread); splx(s); return res; } splx(s); return KERN_NOT_WAITING; }
/* * Routine: lck_mtx_clear_promoted * * Handle clearing of TH_SFLAG_PROMOTED, * adjusting thread priority as needed. * * Called with thread lock held */ static void lck_mtx_clear_promoted ( thread_t thread, __kdebug_only uintptr_t trace_lck) { thread->sched_flags &= ~TH_SFLAG_PROMOTED; if (thread->sched_flags & TH_SFLAG_RW_PROMOTED) { /* Thread still has a RW lock promotion */ } else if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) { KERNEL_DEBUG_CONSTANT( MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) | DBG_FUNC_NONE, thread->sched_pri, DEPRESSPRI, 0, trace_lck, 0); set_sched_pri(thread, DEPRESSPRI); } else { if (thread->base_pri < thread->sched_pri) { KERNEL_DEBUG_CONSTANT( MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) | DBG_FUNC_NONE, thread->sched_pri, thread->base_pri, 0, trace_lck, 0); } thread_recompute_sched_pri(thread, FALSE); } }
/* * Routine: lck_mtx_lock_wait * * Invoked in order to wait on contention. * * Called with the interlock locked and * returns it unlocked. */ void lck_mtx_lock_wait ( lck_mtx_t *lck, thread_t holder) { thread_t self = current_thread(); lck_mtx_t *mutex; __kdebug_only uintptr_t trace_lck = VM_KERNEL_UNSLIDE_OR_PERM(lck); __kdebug_only uintptr_t trace_holder = VM_KERNEL_UNSLIDE_OR_PERM(holder); integer_t priority; spl_t s = splsched(); #if CONFIG_DTRACE uint64_t sleep_start = 0; if (lockstat_probemap[LS_LCK_MTX_LOCK_BLOCK] || lockstat_probemap[LS_LCK_MTX_EXT_LOCK_BLOCK]) { sleep_start = mach_absolute_time(); } #endif if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) mutex = lck; else mutex = &lck->lck_mtx_ptr->lck_mtx; KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_START, trace_lck, trace_holder, 0, 0, 0); priority = self->sched_pri; if (priority < self->base_pri) priority = self->base_pri; if (priority < BASEPRI_DEFAULT) priority = BASEPRI_DEFAULT; /* Do not promote past promotion ceiling */ priority = MIN(priority, MAXPRI_PROMOTE); thread_lock(holder); if (mutex->lck_mtx_pri == 0) holder->promotions++; holder->sched_flags |= TH_SFLAG_PROMOTED; if (mutex->lck_mtx_pri < priority && holder->sched_pri < priority) { KERNEL_DEBUG_CONSTANT( MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE, holder->sched_pri, priority, trace_holder, trace_lck, 0); set_sched_pri(holder, priority); } thread_unlock(holder); splx(s); if (mutex->lck_mtx_pri < priority) mutex->lck_mtx_pri = priority; if (self->pending_promoter[self->pending_promoter_index] == NULL) { self->pending_promoter[self->pending_promoter_index] = mutex; mutex->lck_mtx_waiters++; } else if (self->pending_promoter[self->pending_promoter_index] != mutex) { self->pending_promoter[++self->pending_promoter_index] = mutex; mutex->lck_mtx_waiters++; } assert_wait(LCK_MTX_EVENT(mutex), THREAD_UNINT); lck_mtx_ilk_unlock(mutex); thread_block(THREAD_CONTINUE_NULL); KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_END, 0, 0, 0, 0, 0); #if CONFIG_DTRACE /* * Record the Dtrace lockstat probe for blocking, block time * measured from when we were entered. */ if (sleep_start) { if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) { LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_BLOCK, lck, mach_absolute_time() - sleep_start); } else { LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_BLOCK, lck, mach_absolute_time() - sleep_start); } } #endif }
/* * Routine: lck_mtx_lock_wait * * Invoked in order to wait on contention. * * Called with the interlock locked and * returns it unlocked. */ void lck_mtx_lock_wait ( lck_mtx_t *lck, thread_t holder) { thread_t self = current_thread(); lck_mtx_t *mutex; integer_t priority; spl_t s = splsched(); #if CONFIG_DTRACE uint64_t sleep_start = 0; if (lockstat_probemap[LS_LCK_MTX_LOCK_BLOCK] || lockstat_probemap[LS_LCK_MTX_EXT_LOCK_BLOCK]) { sleep_start = mach_absolute_time(); } #endif if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) mutex = lck; else mutex = &lck->lck_mtx_ptr->lck_mtx; KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_START, (int)lck, (int)holder, 0, 0, 0); priority = self->sched_pri; if (priority < self->priority) priority = self->priority; if (priority < BASEPRI_DEFAULT) priority = BASEPRI_DEFAULT; thread_lock(holder); if (mutex->lck_mtx_pri == 0) holder->promotions++; holder->sched_mode |= TH_MODE_PROMOTED; if ( mutex->lck_mtx_pri < priority && holder->sched_pri < priority ) { KERNEL_DEBUG_CONSTANT( MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE, holder->sched_pri, priority, (int)holder, (int)lck, 0); set_sched_pri(holder, priority); } thread_unlock(holder); splx(s); if (mutex->lck_mtx_pri < priority) mutex->lck_mtx_pri = priority; if (self->pending_promoter[self->pending_promoter_index] == NULL) { self->pending_promoter[self->pending_promoter_index] = mutex; mutex->lck_mtx_waiters++; } else if (self->pending_promoter[self->pending_promoter_index] != mutex) { self->pending_promoter[++self->pending_promoter_index] = mutex; mutex->lck_mtx_waiters++; } assert_wait((event_t)(((unsigned int*)lck)+((sizeof(lck_mtx_t)-1)/sizeof(unsigned int))), THREAD_UNINT); lck_mtx_ilk_unlock(mutex); thread_block(THREAD_CONTINUE_NULL); KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_END, 0, 0, 0, 0, 0); #if CONFIG_DTRACE /* * Record the Dtrace lockstat probe for blocking, block time * measured from when we were entered. */ if (sleep_start) { if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) { LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_BLOCK, lck, mach_absolute_time() - sleep_start); } else { LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_BLOCK, lck, mach_absolute_time() - sleep_start); } } #endif }