kern_return_t thread_resume( register thread_t thread) { kern_return_t result = KERN_SUCCESS; if (thread == THREAD_NULL || thread->task == kernel_task) return (KERN_INVALID_ARGUMENT); thread_mtx_lock(thread); if (thread->active) { if (thread->user_stop_count > 0) { if ( --thread->user_stop_count == 0 && --thread->suspend_count == 0 ) { if (thread->started) thread_wakeup_one(&thread->suspend_count); else { thread_start_internal(thread); } } } else result = KERN_FAILURE; } else result = KERN_TERMINATED; thread_mtx_unlock(thread); return (result); }
kern_return_t thread_suspend( register thread_t thread) { thread_t self = current_thread(); kern_return_t result = KERN_SUCCESS; if (thread == THREAD_NULL || thread->task == kernel_task) return (KERN_INVALID_ARGUMENT); thread_mtx_lock(thread); if (thread->active) { if ( thread->user_stop_count++ == 0 && thread->suspend_count++ == 0 ) { install_special_handler(thread); if (thread != self) thread_wakeup_one(&thread->suspend_count); } } else result = KERN_TERMINATED; thread_mtx_unlock(thread); if (thread != self && result == KERN_SUCCESS) thread_wait(thread, FALSE); return (result); }
void mutex_unlock_wakeup ( mutex_t * m) { assert(m->waiters); m->waiters--; thread_wakeup_one ((event_t) m); }
// Free is called twice: // First when the atomic retainCount transitions from 1 -> 0 // Secondly when the work loop itself is commiting hari kari // Hence the each leg of the free must be single threaded. void IOWorkLoop::free() { if (workThread) { IOInterruptState is; // If we are here then we must be trying to shut down this work loop // in this case disable all of the event source, mark the loop // as terminating and wakeup the work thread itself and return // Note: we hold the gate across the entire operation mainly for the // benefit of our event sources so we can disable them cleanly. closeGate(); disableAllEventSources(); is = IOSimpleLockLockDisableInterrupt(workToDoLock); SETP(&fFlags, kLoopTerminate); thread_wakeup_one((void *) &workToDo); IOSimpleLockUnlockEnableInterrupt(workToDoLock, is); openGate(); } else /* !workThread */ { IOEventSource *event, *next; for (event = eventChain; event; event = next) { next = event->getNext(); event->setWorkLoop(0); event->setNext(0); event->release(); } eventChain = 0; // Either we have a partial initialization to clean up // or the workThread itself is performing hari-kari. // Either way clean up all of our resources and return. if (controlG) { controlG->release(); controlG = 0; } if (workToDoLock) { IOSimpleLockFree(workToDoLock); workToDoLock = 0; } if (gateLock) { IORecursiveLockFree(gateLock); gateLock = 0; } if (reserved) { IODelete(reserved, ExpansionData, 1); reserved = 0; } super::free(); } }
void IOFWSyncer::privateSignal() { if (threadMustStop) { IOInterruptState is = IOSimpleLockLockDisableInterrupt(guardLock); threadMustStop = false; thread_wakeup_one((void *) &threadMustStop); IOSimpleLockUnlockEnableInterrupt(guardLock, is); } }
// Internal APIs used by event sources to control the thread void IOWorkLoop::signalWorkAvailable() { if (workToDoLock) { IOInterruptState is = IOSimpleLockLockDisableInterrupt(workToDoLock); workToDo = true; thread_wakeup_one((void *) &workToDo); IOSimpleLockUnlockEnableInterrupt(workToDoLock, is); } }
/* * Routine: lck_mtx_unlock_wakeup * * Invoked on unlock when there is contention. * * Called with the interlock locked. */ void lck_mtx_unlock_wakeup ( lck_mtx_t *lck, thread_t holder) { thread_t thread = current_thread(); lck_mtx_t *mutex; if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) mutex = lck; else mutex = &lck->lck_mtx_ptr->lck_mtx; if (thread != holder) panic("lck_mtx_unlock_wakeup: mutex %p holder %p\n", mutex, holder); KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_START, (int)lck, (int)holder, 0, 0, 0); assert(mutex->lck_mtx_waiters > 0); thread_wakeup_one((event_t)(((unsigned int*)lck)+(sizeof(lck_mtx_t)-1)/sizeof(unsigned int))); if (thread->promotions > 0) { spl_t s = splsched(); thread_lock(thread); if ( --thread->promotions == 0 && (thread->sched_flags & TH_SFLAG_PROMOTED) ) { thread->sched_flags &= ~TH_SFLAG_PROMOTED; if (thread->sched_flags & TH_SFLAG_RW_PROMOTED) { /* Thread still has a RW lock promotion */ } else if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) { KERNEL_DEBUG_CONSTANT( MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) | DBG_FUNC_NONE, thread->sched_pri, DEPRESSPRI, 0, lck, 0); set_sched_pri(thread, DEPRESSPRI); } else { if (thread->priority < thread->sched_pri) { KERNEL_DEBUG_CONSTANT( MACHDBG_CODE(DBG_MACH_SCHED,MACH_DEMOTE) | DBG_FUNC_NONE, thread->sched_pri, thread->priority, 0, lck, 0); } SCHED(compute_priority)(thread, FALSE); } } thread_unlock(thread); splx(s); } KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_END, 0, 0, 0, 0, 0); }
/* * Suspend execution of the specified thread. * This is a recursive-style suspension of the thread, a count of * suspends is maintained. * * Called with thread mutex held. */ void thread_hold( register thread_t thread) { if (thread->suspend_count++ == 0) { install_special_handler(thread); if (thread->started) thread_wakeup_one(&thread->suspend_count); } }
/* * Decrement internal suspension count, setting thread * runnable when count falls to zero. * * Called with thread mutex held. */ void thread_release( register thread_t thread) { if ( thread->suspend_count > 0 && --thread->suspend_count == 0 ) { if (thread->started) thread_wakeup_one(&thread->suspend_count); else { thread_start_internal(thread); } } }
/* * Suspend execution of the specified thread. * This is a recursive-style suspension of the thread, a count of * suspends is maintained. * * Called with act_lock held. */ void thread_hold( register thread_act_t act) { thread_t thread = act->thread; if (act->suspend_count++ == 0) { install_special_handler(act); if ( act->started && thread != THREAD_NULL && thread->top_act == act ) thread_wakeup_one(&act->suspend_count); } }
void lck_mtx_unlockspin_wakeup ( lck_mtx_t *lck) { assert(lck->lck_mtx_waiters > 0); thread_wakeup_one(LCK_MTX_EVENT(lck)); KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(lck), 0, 0, 1, 0); #if CONFIG_DTRACE /* * When there are waiters, we skip the hot-patch spot in the * fastpath, so we record it here. */ LOCKSTAT_RECORD(LS_LCK_MTX_UNLOCK_RELEASE, lck, 0); #endif }
void lck_mtx_unlockspin_wakeup ( lck_mtx_t *lck) { assert(lck->lck_mtx_waiters > 0); thread_wakeup_one((event_t)(((unsigned int*)lck)+(sizeof(lck_mtx_t)-1)/sizeof(unsigned int))); KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_NONE, (int)lck, 0, 0, 1, 0); #if CONFIG_DTRACE /* * When there are waiters, we skip the hot-patch spot in the * fastpath, so we record it here. */ LOCKSTAT_RECORD(LS_LCK_MTX_UNLOCK_RELEASE, lck, 0); #endif }
/* * Decrement internal suspension count for thr_act, setting thread * runnable when count falls to zero. * * Called with act_lock held. */ void thread_release( register thread_act_t act) { thread_t thread = act->thread; if ( act->suspend_count > 0 && --act->suspend_count == 0 && thread != THREAD_NULL && thread->top_act == act ) { if (!act->started) { clear_wait(thread, THREAD_AWAKENED); act->started = TRUE; } else thread_wakeup_one(&act->suspend_count); } }
/* * Routine: lck_mtx_unlock_wakeup * * Invoked on unlock when there is contention. * * Called with the interlock locked. */ void lck_mtx_unlock_wakeup ( lck_mtx_t *lck, thread_t holder) { thread_t thread = current_thread(); lck_mtx_t *mutex; __kdebug_only uintptr_t trace_lck = VM_KERNEL_UNSLIDE_OR_PERM(lck); if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) mutex = lck; else mutex = &lck->lck_mtx_ptr->lck_mtx; if (thread != holder) panic("lck_mtx_unlock_wakeup: mutex %p holder %p\n", mutex, holder); KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_START, trace_lck, VM_KERNEL_UNSLIDE_OR_PERM(holder), 0, 0, 0); assert(mutex->lck_mtx_waiters > 0); if (mutex->lck_mtx_waiters > 1) thread_wakeup_one_with_pri(LCK_MTX_EVENT(lck), lck->lck_mtx_pri); else thread_wakeup_one(LCK_MTX_EVENT(lck)); if (thread->promotions > 0) { spl_t s = splsched(); thread_lock(thread); if (--thread->promotions == 0 && (thread->sched_flags & TH_SFLAG_PROMOTED)) lck_mtx_clear_promoted(thread, trace_lck); thread_unlock(thread); splx(s); } KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_END, 0, 0, 0, 0, 0); }
kern_return_t thread_resume( register thread_act_t act) { kern_return_t result = KERN_SUCCESS; thread_t thread; if (act == THR_ACT_NULL || act->task == kernel_task) return (KERN_INVALID_ARGUMENT); thread = act_lock_thread(act); if (act->active) { if (act->user_stop_count > 0) { if ( --act->user_stop_count == 0 && --act->suspend_count == 0 && thread != THREAD_NULL && thread->top_act == act ) { if (!act->started) { clear_wait(thread, THREAD_AWAKENED); act->started = TRUE; } else thread_wakeup_one(&act->suspend_count); } } else result = KERN_FAILURE; } else result = KERN_TERMINATED; act_unlock_thread(act); return (result); }
kern_return_t thread_suspend( register thread_act_t act) { thread_t thread; if (act == THR_ACT_NULL || act->task == kernel_task) return (KERN_INVALID_ARGUMENT); thread = act_lock_thread(act); if (!act->active) { act_unlock_thread(act); return (KERN_TERMINATED); } if ( act->user_stop_count++ == 0 && act->suspend_count++ == 0 ) { install_special_handler(act); if ( thread != current_thread() && thread != THREAD_NULL && thread->top_act == act ) { assert(act->started); thread_wakeup_one(&act->suspend_count); act_unlock_thread(act); thread_wait(thread); } else act_unlock_thread(act); } else act_unlock_thread(act); return (KERN_SUCCESS); }
void DldIODataQueue::sendDataAvailableNotificationOneThread() { thread_wakeup_one( this->dataQueue ); }