/* * Routine: lck_mtx_lock_wait * * Invoked in order to wait on contention. * * Called with the interlock locked and * returns it unlocked. */ void lck_mtx_lock_wait ( lck_mtx_t *lck, thread_t holder) { thread_t self = current_thread(); lck_mtx_t *mutex; __kdebug_only uintptr_t trace_lck = VM_KERNEL_UNSLIDE_OR_PERM(lck); __kdebug_only uintptr_t trace_holder = VM_KERNEL_UNSLIDE_OR_PERM(holder); integer_t priority; spl_t s = splsched(); #if CONFIG_DTRACE uint64_t sleep_start = 0; if (lockstat_probemap[LS_LCK_MTX_LOCK_BLOCK] || lockstat_probemap[LS_LCK_MTX_EXT_LOCK_BLOCK]) { sleep_start = mach_absolute_time(); } #endif if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) mutex = lck; else mutex = &lck->lck_mtx_ptr->lck_mtx; KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_START, trace_lck, trace_holder, 0, 0, 0); priority = self->sched_pri; if (priority < self->base_pri) priority = self->base_pri; if (priority < BASEPRI_DEFAULT) priority = BASEPRI_DEFAULT; /* Do not promote past promotion ceiling */ priority = MIN(priority, MAXPRI_PROMOTE); thread_lock(holder); if (mutex->lck_mtx_pri == 0) holder->promotions++; holder->sched_flags |= TH_SFLAG_PROMOTED; if (mutex->lck_mtx_pri < priority && holder->sched_pri < priority) { KERNEL_DEBUG_CONSTANT( MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE, holder->sched_pri, priority, trace_holder, trace_lck, 0); set_sched_pri(holder, priority); } thread_unlock(holder); splx(s); if (mutex->lck_mtx_pri < priority) mutex->lck_mtx_pri = priority; if (self->pending_promoter[self->pending_promoter_index] == NULL) { self->pending_promoter[self->pending_promoter_index] = mutex; mutex->lck_mtx_waiters++; } else if (self->pending_promoter[self->pending_promoter_index] != mutex) { self->pending_promoter[++self->pending_promoter_index] = mutex; mutex->lck_mtx_waiters++; } assert_wait(LCK_MTX_EVENT(mutex), THREAD_UNINT); lck_mtx_ilk_unlock(mutex); thread_block(THREAD_CONTINUE_NULL); KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_END, 0, 0, 0, 0, 0); #if CONFIG_DTRACE /* * Record the Dtrace lockstat probe for blocking, block time * measured from when we were entered. */ if (sleep_start) { if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) { LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_BLOCK, lck, mach_absolute_time() - sleep_start); } else { LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_BLOCK, lck, mach_absolute_time() - sleep_start); } } #endif }
/* * Routine: lck_mtx_lock_wait * * Invoked in order to wait on contention. * * Called with the interlock locked and * returns it unlocked. */ void lck_mtx_lock_wait ( lck_mtx_t *lck, thread_t holder) { thread_t self = current_thread(); lck_mtx_t *mutex; integer_t priority; spl_t s = splsched(); #if CONFIG_DTRACE uint64_t sleep_start = 0; if (lockstat_probemap[LS_LCK_MTX_LOCK_BLOCK] || lockstat_probemap[LS_LCK_MTX_EXT_LOCK_BLOCK]) { sleep_start = mach_absolute_time(); } #endif if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) mutex = lck; else mutex = &lck->lck_mtx_ptr->lck_mtx; KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_START, (int)lck, (int)holder, 0, 0, 0); priority = self->sched_pri; if (priority < self->priority) priority = self->priority; if (priority < BASEPRI_DEFAULT) priority = BASEPRI_DEFAULT; thread_lock(holder); if (mutex->lck_mtx_pri == 0) holder->promotions++; holder->sched_mode |= TH_MODE_PROMOTED; if ( mutex->lck_mtx_pri < priority && holder->sched_pri < priority ) { KERNEL_DEBUG_CONSTANT( MACHDBG_CODE(DBG_MACH_SCHED,MACH_PROMOTE) | DBG_FUNC_NONE, holder->sched_pri, priority, (int)holder, (int)lck, 0); set_sched_pri(holder, priority); } thread_unlock(holder); splx(s); if (mutex->lck_mtx_pri < priority) mutex->lck_mtx_pri = priority; if (self->pending_promoter[self->pending_promoter_index] == NULL) { self->pending_promoter[self->pending_promoter_index] = mutex; mutex->lck_mtx_waiters++; } else if (self->pending_promoter[self->pending_promoter_index] != mutex) { self->pending_promoter[++self->pending_promoter_index] = mutex; mutex->lck_mtx_waiters++; } assert_wait((event_t)(((unsigned int*)lck)+((sizeof(lck_mtx_t)-1)/sizeof(unsigned int))), THREAD_UNINT); lck_mtx_ilk_unlock(mutex); thread_block(THREAD_CONTINUE_NULL); KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_END, 0, 0, 0, 0, 0); #if CONFIG_DTRACE /* * Record the Dtrace lockstat probe for blocking, block time * measured from when we were entered. */ if (sleep_start) { if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) { LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_BLOCK, lck, mach_absolute_time() - sleep_start); } else { LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_BLOCK, lck, mach_absolute_time() - sleep_start); } } #endif }