/* * Routine: wait_queue_assert_wait * Purpose: * Insert the current thread into the supplied wait queue * waiting for a particular event to be posted to that queue. * * Conditions: * nothing of interest locked. */ wait_result_t wait_queue_assert_wait( wait_queue_t wq, event_t event, wait_interrupt_t interruptible) { spl_t s; wait_result_t ret; thread_t cur_thread = current_thread(); /* If it is an invalid wait queue, you can't wait on it */ if (!wait_queue_is_valid(wq)) { thread_t thread = current_thread(); return (thread->wait_result = THREAD_RESTART); } s = splsched(); wait_queue_lock(wq); thread_lock(cur_thread); ret = wait_queue_assert_wait64_locked( wq, (event64_t)((uint32_t)event), interruptible, cur_thread); thread_unlock(cur_thread); wait_queue_unlock(wq); splx(s); return(ret); }
/* * Routine: wait_queue_wakeup64_thread * Purpose: * Wakeup the particular thread that was specified if and only * it was in this wait queue (or one of it's set's queues) * and waiting on the specified event. * * This is much safer than just removing the thread from * whatever wait queue it happens to be on. For instance, it * may have already been awoken from the wait you intended to * interrupt and waited on something else (like another * semaphore). * Conditions: * nothing of interest locked * we need to assume spl needs to be raised * Returns: * KERN_SUCCESS - the thread was found waiting and awakened * KERN_NOT_WAITING - the thread was not waiting here */ kern_return_t wait_queue_wakeup64_thread( wait_queue_t wq, event64_t event, thread_t thread, wait_result_t result) { kern_return_t res; spl_t s; if (!wait_queue_is_valid(wq)) { return KERN_INVALID_ARGUMENT; } s = splsched(); wait_queue_lock(wq); res = _wait_queue_select64_thread(wq, event, thread); wait_queue_unlock(wq); if (res == KERN_SUCCESS) { res = thread_go_locked(thread, result); assert(res == KERN_SUCCESS); thread_unlock(thread); splx(s); return res; } splx(s); return KERN_NOT_WAITING; }
/* * Routine: wait_queue_link_internal * Purpose: * Insert a set wait queue into a wait queue. This * requires us to link the two together using a wait_queue_link * structure that was provided. * Conditions: * The wait queue being inserted must be inited as a set queue * The wait_queue_link structure must already be properly typed */ static kern_return_t wait_queue_link_internal( wait_queue_t wq, wait_queue_set_t wq_set, wait_queue_link_t wql) { wait_queue_element_t wq_element; queue_t q; spl_t s; if (!wait_queue_is_valid(wq) || !wait_queue_is_set(wq_set)) return KERN_INVALID_ARGUMENT; /* * There are probably fewer threads and sets associated with * the wait queue than there are wait queues associated with * the set. So let's validate it that way. */ s = splsched(); wait_queue_lock(wq); q = &wq->wq_queue; wq_element = (wait_queue_element_t) queue_first(q); while (!queue_end(q, (queue_entry_t)wq_element)) { WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element); if ((wq_element->wqe_type == WAIT_QUEUE_LINK || wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) && ((wait_queue_link_t)wq_element)->wql_setqueue == wq_set) { wait_queue_unlock(wq); splx(s); return KERN_ALREADY_IN_SET; } wq_element = (wait_queue_element_t) queue_next((queue_t) wq_element); } /* * Not already a member, so we can add it. */ wqs_lock(wq_set); WAIT_QUEUE_SET_CHECK(wq_set); assert(wql->wql_type == WAIT_QUEUE_LINK || wql->wql_type == WAIT_QUEUE_LINK_NOALLOC); wql->wql_queue = wq; wql_clear_prepost(wql); queue_enter(&wq->wq_queue, wql, wait_queue_link_t, wql_links); wql->wql_setqueue = wq_set; queue_enter(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks); wqs_unlock(wq_set); wait_queue_unlock(wq); splx(s); return KERN_SUCCESS; }
kern_return_t wait_queue_unlink_all( wait_queue_t wq) { wait_queue_element_t wq_element; wait_queue_element_t wq_next_element; wait_queue_set_t wq_set; wait_queue_link_t wql; queue_head_t links_queue_head; queue_t links = &links_queue_head; queue_t q; spl_t s; if (!wait_queue_is_valid(wq)) { return KERN_INVALID_ARGUMENT; } queue_init(links); s = splsched(); wait_queue_lock(wq); q = &wq->wq_queue; wq_element = (wait_queue_element_t) queue_first(q); while (!queue_end(q, (queue_entry_t)wq_element)) { boolean_t alloced; WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element); wq_next_element = (wait_queue_element_t) queue_next((queue_t) wq_element); alloced = (wq_element->wqe_type == WAIT_QUEUE_LINK); if (alloced || wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) { wql = (wait_queue_link_t)wq_element; wq_set = wql->wql_setqueue; wqs_lock(wq_set); wait_queue_unlink_locked(wq, wq_set, wql); wqs_unlock(wq_set); if (alloced) enqueue(links, &wql->wql_links); } wq_element = wq_next_element; } wait_queue_unlock(wq); splx(s); while(!queue_empty(links)) { wql = (wait_queue_link_t) dequeue(links); zfree(_wait_queue_link_zone, wql); } return(KERN_SUCCESS); }
/* * Routine: wait_queue_unlink * Purpose: * Remove the linkage between a wait queue and a set, * freeing the linkage structure. * Conditions: * The wait queue being must be a member set queue */ kern_return_t wait_queue_unlink( wait_queue_t wq, wait_queue_set_t wq_set) { wait_queue_element_t wq_element; wait_queue_link_t wql; queue_t q; spl_t s; if (!wait_queue_is_valid(wq) || !wait_queue_is_set(wq_set)) { return KERN_INVALID_ARGUMENT; } s = splsched(); wait_queue_lock(wq); q = &wq->wq_queue; wq_element = (wait_queue_element_t) queue_first(q); while (!queue_end(q, (queue_entry_t)wq_element)) { WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element); if (wq_element->wqe_type == WAIT_QUEUE_LINK || wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) { wql = (wait_queue_link_t)wq_element; if (wql->wql_setqueue == wq_set) { boolean_t alloced; alloced = (wql->wql_type == WAIT_QUEUE_LINK); wqs_lock(wq_set); wait_queue_unlink_locked(wq, wq_set, wql); wqs_unlock(wq_set); wait_queue_unlock(wq); splx(s); if (alloced) zfree(_wait_queue_link_zone, wql); return KERN_SUCCESS; } } wq_element = (wait_queue_element_t) queue_next((queue_t) wq_element); } wait_queue_unlock(wq); splx(s); return KERN_NOT_IN_SET; }
/* * Routine: wait_queue_wakeup64_all * Purpose: * Wakeup some number of threads that are in the specified * wait queue and waiting on the specified event. * Conditions: * Nothing locked * Returns: * KERN_SUCCESS - Threads were woken up * KERN_NOT_WAITING - No threads were waiting <wq,event> pair */ kern_return_t wait_queue_wakeup64_all( wait_queue_t wq, event64_t event, wait_result_t result) { kern_return_t ret; spl_t s; if (!wait_queue_is_valid(wq)) { return KERN_INVALID_ARGUMENT; } s = splsched(); wait_queue_lock(wq); ret = wait_queue_wakeup64_all_locked(wq, event, result, TRUE); /* lock released */ splx(s); return ret; }
/* * Routine: wait_queue_wakeup_one * Purpose: * Wakeup the most appropriate thread that is in the specified * wait queue for the specified event. * Conditions: * Nothing locked * Returns: * KERN_SUCCESS - Thread was woken up * KERN_NOT_WAITING - No thread was waiting <wq,event> pair */ kern_return_t wait_queue_wakeup_one( wait_queue_t wq, event_t event, wait_result_t result, int priority) { thread_t thread; spl_t s; if (!wait_queue_is_valid(wq)) { return KERN_INVALID_ARGUMENT; } s = splsched(); wait_queue_lock(wq); thread = _wait_queue_select64_one(wq, CAST_DOWN(event64_t,event)); wait_queue_unlock(wq); if (thread) { kern_return_t res; if (thread->sched_pri < priority) { if (priority <= MAXPRI) { set_sched_pri(thread, priority); thread->was_promoted_on_wakeup = 1; thread->sched_flags |= TH_SFLAG_PROMOTED; } } res = thread_go(thread, result); assert(res == KERN_SUCCESS); thread_unlock(thread); splx(s); return res; } splx(s); return KERN_NOT_WAITING; }
/* * Routine: wait_queue_assert_wait64 * Purpose: * Insert the current thread into the supplied wait queue * waiting for a particular event to be posted to that queue. * Conditions: * nothing of interest locked. */ wait_result_t wait_queue_assert_wait64( wait_queue_t wq, event64_t event, wait_interrupt_t interruptible, uint64_t deadline) { spl_t s; wait_result_t ret; thread_t thread = current_thread(); /* If it is an invalid wait queue, you cant wait on it */ if (!wait_queue_is_valid(wq)) return (thread->wait_result = THREAD_RESTART); s = splsched(); wait_queue_lock(wq); thread_lock(thread); ret = wait_queue_assert_wait64_locked(wq, event, interruptible, deadline, thread); thread_unlock(thread); wait_queue_unlock(wq); splx(s); return(ret); }