/* * Routine: _wait_queue_select64_one * Purpose: * Select the best thread off a wait queue that meet the * supplied criteria. * Conditions: * at splsched * wait queue locked * possibly recursive * Returns: * a locked thread - if one found * Note: * This is where the sync policy of the wait queue comes * into effect. For now, we just assume FIFO. */ static thread_t _wait_queue_select64_one( wait_queue_t wq, event64_t event) { wait_queue_element_t wq_element; wait_queue_element_t wqe_next; thread_t t = THREAD_NULL; queue_t q; assert(wq->wq_fifo); q = &wq->wq_queue; wq_element = (wait_queue_element_t) queue_first(q); while (!queue_end(q, (queue_entry_t)wq_element)) { WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element); wqe_next = (wait_queue_element_t) queue_next((queue_t) wq_element); /* * We may have to recurse if this is a compound wait queue. */ if (wq_element->wqe_type == WAIT_QUEUE_LINK) { wait_queue_link_t wql = (wait_queue_link_t)wq_element; wait_queue_t set_queue; /* * We have to check the set wait queue. */ set_queue = (wait_queue_t)wql->wql_setqueue; wait_queue_lock(set_queue); if (! wait_queue_empty(set_queue)) { t = _wait_queue_select64_one(set_queue, event); } wait_queue_unlock(set_queue); if (t != THREAD_NULL) return t; } else { /* * Otherwise, its a thread. If it is waiting on * the event we are posting to this queue, pull * it off the queue and stick it in out wake_queue. */ thread_t t = (thread_t)wq_element; if (t->wait_event == event) { thread_lock(t); remqueue(q, (queue_entry_t) t); t->wait_queue = WAIT_QUEUE_NULL; t->wait_event = NO_EVENT64; t->at_safe_point = FALSE; return t; /* still locked */ } } wq_element = wqe_next; } return THREAD_NULL; }
/* * Routine: wait_queue_member_locked * Purpose: * Indicate if this set queue is a member of the queue * Conditions: * The wait queue is locked * The set queue is just that, a set queue */ static boolean_t wait_queue_member_locked( wait_queue_t wq, wait_queue_set_t wq_set) { wait_queue_element_t wq_element; queue_t q; assert(wait_queue_held(wq)); assert(wait_queue_is_set(wq_set)); q = &wq->wq_queue; wq_element = (wait_queue_element_t) queue_first(q); while (!queue_end(q, (queue_entry_t)wq_element)) { WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element); if ((wq_element->wqe_type == WAIT_QUEUE_LINK) || (wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC)) { wait_queue_link_t wql = (wait_queue_link_t)wq_element; if (wql->wql_setqueue == wq_set) return TRUE; } wq_element = (wait_queue_element_t) queue_next((queue_t) wq_element); } return FALSE; }
/* * Routine: wait_queue_link_internal * Purpose: * Insert a set wait queue into a wait queue. This * requires us to link the two together using a wait_queue_link * structure that was provided. * Conditions: * The wait queue being inserted must be inited as a set queue * The wait_queue_link structure must already be properly typed */ static kern_return_t wait_queue_link_internal( wait_queue_t wq, wait_queue_set_t wq_set, wait_queue_link_t wql) { wait_queue_element_t wq_element; queue_t q; spl_t s; if (!wait_queue_is_valid(wq) || !wait_queue_is_set(wq_set)) return KERN_INVALID_ARGUMENT; /* * There are probably fewer threads and sets associated with * the wait queue than there are wait queues associated with * the set. So let's validate it that way. */ s = splsched(); wait_queue_lock(wq); q = &wq->wq_queue; wq_element = (wait_queue_element_t) queue_first(q); while (!queue_end(q, (queue_entry_t)wq_element)) { WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element); if ((wq_element->wqe_type == WAIT_QUEUE_LINK || wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) && ((wait_queue_link_t)wq_element)->wql_setqueue == wq_set) { wait_queue_unlock(wq); splx(s); return KERN_ALREADY_IN_SET; } wq_element = (wait_queue_element_t) queue_next((queue_t) wq_element); } /* * Not already a member, so we can add it. */ wqs_lock(wq_set); WAIT_QUEUE_SET_CHECK(wq_set); assert(wql->wql_type == WAIT_QUEUE_LINK || wql->wql_type == WAIT_QUEUE_LINK_NOALLOC); wql->wql_queue = wq; wql_clear_prepost(wql); queue_enter(&wq->wq_queue, wql, wait_queue_link_t, wql_links); wql->wql_setqueue = wq_set; queue_enter(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks); wqs_unlock(wq_set); wait_queue_unlock(wq); splx(s); return KERN_SUCCESS; }
kern_return_t wait_queue_unlink_all( wait_queue_t wq) { wait_queue_element_t wq_element; wait_queue_element_t wq_next_element; wait_queue_set_t wq_set; wait_queue_link_t wql; queue_head_t links_queue_head; queue_t links = &links_queue_head; queue_t q; spl_t s; if (!wait_queue_is_valid(wq)) { return KERN_INVALID_ARGUMENT; } queue_init(links); s = splsched(); wait_queue_lock(wq); q = &wq->wq_queue; wq_element = (wait_queue_element_t) queue_first(q); while (!queue_end(q, (queue_entry_t)wq_element)) { boolean_t alloced; WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element); wq_next_element = (wait_queue_element_t) queue_next((queue_t) wq_element); alloced = (wq_element->wqe_type == WAIT_QUEUE_LINK); if (alloced || wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) { wql = (wait_queue_link_t)wq_element; wq_set = wql->wql_setqueue; wqs_lock(wq_set); wait_queue_unlink_locked(wq, wq_set, wql); wqs_unlock(wq_set); if (alloced) enqueue(links, &wql->wql_links); } wq_element = wq_next_element; } wait_queue_unlock(wq); splx(s); while(!queue_empty(links)) { wql = (wait_queue_link_t) dequeue(links); zfree(_wait_queue_link_zone, wql); } return(KERN_SUCCESS); }
/* * Routine: wait_queue_select64_thread * Purpose: * Look for a thread and remove it from the queues, if * (and only if) the thread is waiting on the supplied * <wait_queue, event> pair. * Conditions: * at splsched * wait queue locked * possibly recursive * Returns: * KERN_NOT_WAITING: Thread is not waiting here. * KERN_SUCCESS: It was, and is now removed (returned locked) */ static kern_return_t _wait_queue_select64_thread( wait_queue_t wq, event64_t event, thread_t thread) { wait_queue_element_t wq_element; wait_queue_element_t wqe_next; kern_return_t res = KERN_NOT_WAITING; queue_t q = &wq->wq_queue; thread_lock(thread); if ((thread->wait_queue == wq) && (thread->wait_event == event)) { remqueue((queue_entry_t) thread); thread->at_safe_point = FALSE; thread->wait_event = NO_EVENT64; thread->wait_queue = WAIT_QUEUE_NULL; /* thread still locked */ return KERN_SUCCESS; } thread_unlock(thread); /* * The wait_queue associated with the thread may be one of this * wait queue's sets. Go see. If so, removing it from * there is like removing it from here. */ wq_element = (wait_queue_element_t) queue_first(q); while (!queue_end(q, (queue_entry_t)wq_element)) { WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element); wqe_next = (wait_queue_element_t) queue_next((queue_t) wq_element); if (wq_element->wqe_type == WAIT_QUEUE_LINK || wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) { wait_queue_link_t wql = (wait_queue_link_t)wq_element; wait_queue_set_t set_queue = wql->wql_setqueue; wqs_lock(set_queue); if (! wait_queue_empty(&set_queue->wqs_wait_queue)) { res = _wait_queue_select64_thread(&set_queue->wqs_wait_queue, event, thread); } wqs_unlock(set_queue); if (res == KERN_SUCCESS) return KERN_SUCCESS; } wq_element = wqe_next; } return res; }
kern_return_t wait_queue_unlink_all( wait_queue_t wq) { wait_queue_element_t wq_element; wait_queue_element_t wq_next_element; wait_queue_set_t wq_set; wait_queue_link_t wql; queue_head_t links_queue_head; queue_t links = &links_queue_head; queue_t q; spl_t s; if (!wait_queue_is_queue(wq)) { return KERN_INVALID_ARGUMENT; } queue_init(links); s = splsched(); wait_queue_lock(wq); q = &wq->wq_queue; wq_element = (wait_queue_element_t) queue_first(q); while (!queue_end(q, (queue_entry_t)wq_element)) { WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element); wq_next_element = (wait_queue_element_t) queue_next((queue_t) wq_element); if (wq_element->wqe_type == WAIT_QUEUE_LINK) { wql = (wait_queue_link_t)wq_element; wq_set = wql->wql_setqueue; wqs_lock(wq_set); wait_queue_unlink_locked(wq, wq_set, wql); wqs_unlock(wq_set); enqueue(links, &wql->wql_links); } wq_element = wq_next_element; } wait_queue_unlock(wq); splx(s); while(!queue_empty(links)) { wql = (wait_queue_link_t) dequeue(links); kfree((vm_offset_t) wql, sizeof(struct wait_queue_link)); } return(KERN_SUCCESS); }
/* * Routine: wait_queue_unlink * Purpose: * Remove the linkage between a wait queue and a set, * freeing the linkage structure. * Conditions: * The wait queue being must be a member set queue */ kern_return_t wait_queue_unlink( wait_queue_t wq, wait_queue_set_t wq_set) { wait_queue_element_t wq_element; wait_queue_link_t wql; queue_t q; spl_t s; if (!wait_queue_is_valid(wq) || !wait_queue_is_set(wq_set)) { return KERN_INVALID_ARGUMENT; } s = splsched(); wait_queue_lock(wq); q = &wq->wq_queue; wq_element = (wait_queue_element_t) queue_first(q); while (!queue_end(q, (queue_entry_t)wq_element)) { WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element); if (wq_element->wqe_type == WAIT_QUEUE_LINK || wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) { wql = (wait_queue_link_t)wq_element; if (wql->wql_setqueue == wq_set) { boolean_t alloced; alloced = (wql->wql_type == WAIT_QUEUE_LINK); wqs_lock(wq_set); wait_queue_unlink_locked(wq, wq_set, wql); wqs_unlock(wq_set); wait_queue_unlock(wq); splx(s); if (alloced) zfree(_wait_queue_link_zone, wql); return KERN_SUCCESS; } } wq_element = (wait_queue_element_t) queue_next((queue_t) wq_element); } wait_queue_unlock(wq); splx(s); return KERN_NOT_IN_SET; }
/* * Routine: wait_queue_unlink * Purpose: * Remove the linkage between a wait queue and a set, * freeing the linkage structure. * Conditions: * The wait queue being must be a member set queue */ kern_return_t wait_queue_unlink( wait_queue_t wq, wait_queue_set_t wq_set) { wait_queue_element_t wq_element; wait_queue_link_t wql; queue_t q; spl_t s; if (!wait_queue_is_queue(wq) || !wait_queue_is_set(wq_set)) { return KERN_INVALID_ARGUMENT; } s = splsched(); wait_queue_lock(wq); q = &wq->wq_queue; wq_element = (wait_queue_element_t) queue_first(q); while (!queue_end(q, (queue_entry_t)wq_element)) { WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element); if (wq_element->wqe_type == WAIT_QUEUE_LINK) { wql = (wait_queue_link_t)wq_element; if (wql->wql_setqueue == wq_set) { wqs_lock(wq_set); wait_queue_unlink_locked(wq, wq_set, wql); wqs_unlock(wq_set); wait_queue_unlock(wq); splx(s); kfree((vm_offset_t)wql, sizeof(struct wait_queue_link)); return KERN_SUCCESS; } } wq_element = (wait_queue_element_t) queue_next((queue_t) wq_element); } wait_queue_unlock(wq); splx(s); return KERN_NOT_IN_SET; }
/* * Routine: _wait_queue_select64_one * Purpose: * Select the best thread off a wait queue that meet the * supplied criteria. * Conditions: * at splsched * wait queue locked * possibly recursive * Returns: * a locked thread - if one found * Note: * This is where the sync policy of the wait queue comes * into effect. For now, we just assume FIFO/LIFO. */ static thread_t _wait_queue_select64_one( wait_queue_t wq, event64_t event) { wait_queue_element_t wq_element; wait_queue_element_t wqe_next; thread_t t = THREAD_NULL; queue_t q; q = &wq->wq_queue; wq_element = (wait_queue_element_t) queue_first(q); while (!queue_end(q, (queue_entry_t)wq_element)) { WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element); wqe_next = (wait_queue_element_t) queue_next((queue_t) wq_element); /* * We may have to recurse if this is a compound wait queue. */ if (wq_element->wqe_type == WAIT_QUEUE_LINK || wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) { wait_queue_link_t wql = (wait_queue_link_t)wq_element; wait_queue_set_t set_queue = wql->wql_setqueue; /* * We have to check the set wait queue. If the set * supports pre-posting, it isn't already preposted, * and we didn't find a thread in the set, then mark it. * * If we later find a thread, there may be a spurious * pre-post here on this set. The wait side has to check * for that either pre- or post-wait. */ wqs_lock(set_queue); if (! wait_queue_empty(&set_queue->wqs_wait_queue)) { t = _wait_queue_select64_one(&set_queue->wqs_wait_queue, event); } if (t != THREAD_NULL) { wqs_unlock(set_queue); return t; } if (event == NO_EVENT64 && set_queue->wqs_prepost && !wql_is_preposted(wql)) { queue_t ppq = &set_queue->wqs_preposts; queue_enter(ppq, wql, wait_queue_link_t, wql_preposts); } wqs_unlock(set_queue); } else { /* * Otherwise, its a thread. If it is waiting on * the event we are posting to this queue, pull * it off the queue and stick it in out wake_queue. */ t = (thread_t)wq_element; if (t->wait_event == event) { thread_lock(t); remqueue((queue_entry_t) t); t->wait_queue = WAIT_QUEUE_NULL; t->wait_event = NO_EVENT64; t->at_safe_point = FALSE; return t; /* still locked */ } t = THREAD_NULL; } wq_element = wqe_next; } return THREAD_NULL; }
/* * Routine: _wait_queue_select64_all * Purpose: * Select all threads off a wait queue that meet the * supplied criteria. * Conditions: * at splsched * wait queue locked * wake_queue initialized and ready for insertion * possibly recursive * Returns: * a queue of locked threads */ static void _wait_queue_select64_all( wait_queue_t wq, event64_t event, queue_t wake_queue) { wait_queue_element_t wq_element; wait_queue_element_t wqe_next; queue_t q; q = &wq->wq_queue; wq_element = (wait_queue_element_t) queue_first(q); while (!queue_end(q, (queue_entry_t)wq_element)) { WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element); wqe_next = (wait_queue_element_t) queue_next((queue_t) wq_element); /* * We may have to recurse if this is a compound wait queue. */ if (wq_element->wqe_type == WAIT_QUEUE_LINK || wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) { wait_queue_link_t wql = (wait_queue_link_t)wq_element; wait_queue_set_t set_queue = wql->wql_setqueue; /* * We have to check the set wait queue. If it is marked * as pre-post, and it is the "generic event" then mark * it pre-posted now (if not already). */ wqs_lock(set_queue); if (event == NO_EVENT64 && set_queue->wqs_prepost && !wql_is_preposted(wql)) { queue_t ppq = &set_queue->wqs_preposts; queue_enter(ppq, wql, wait_queue_link_t, wql_preposts); } if (! wait_queue_empty(&set_queue->wqs_wait_queue)) _wait_queue_select64_all(&set_queue->wqs_wait_queue, event, wake_queue); wqs_unlock(set_queue); } else { /* * Otherwise, its a thread. If it is waiting on * the event we are posting to this queue, pull * it off the queue and stick it in out wake_queue. */ thread_t t = (thread_t)wq_element; if (t->wait_event == event) { thread_lock(t); remqueue((queue_entry_t) t); enqueue (wake_queue, (queue_entry_t) t); t->wait_queue = WAIT_QUEUE_NULL; t->wait_event = NO_EVENT64; t->at_safe_point = FALSE; /* returned locked */ } } wq_element = wqe_next; } }
/* * Routine: _wait_queue_select64_all * Purpose: * Select all threads off a wait queue that meet the * supplied criteria. * Conditions: * at splsched * wait queue locked * wake_queue initialized and ready for insertion * possibly recursive * Returns: * a queue of locked threads */ static void _wait_queue_select64_all( wait_queue_t wq, event64_t event, queue_t wake_queue) { wait_queue_element_t wq_element; wait_queue_element_t wqe_next; queue_t q; q = &wq->wq_queue; wq_element = (wait_queue_element_t) queue_first(q); while (!queue_end(q, (queue_entry_t)wq_element)) { WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element); wqe_next = (wait_queue_element_t) queue_next((queue_t) wq_element); /* * We may have to recurse if this is a compound wait queue. */ if (wq_element->wqe_type == WAIT_QUEUE_LINK) { wait_queue_link_t wql = (wait_queue_link_t)wq_element; wait_queue_t set_queue; /* * We have to check the set wait queue. */ set_queue = (wait_queue_t)wql->wql_setqueue; wait_queue_lock(set_queue); if (set_queue->wq_isprepost) { wait_queue_set_t wqs = (wait_queue_set_t)set_queue; /* * Preposting is only for sets and wait queue * is the first element of set */ wqs->wqs_refcount++; } if (! wait_queue_empty(set_queue)) _wait_queue_select64_all(set_queue, event, wake_queue); wait_queue_unlock(set_queue); } else { /* * Otherwise, its a thread. If it is waiting on * the event we are posting to this queue, pull * it off the queue and stick it in out wake_queue. */ thread_t t = (thread_t)wq_element; if (t->wait_event == event) { thread_lock(t); remqueue(q, (queue_entry_t) t); enqueue (wake_queue, (queue_entry_t) t); t->wait_queue = WAIT_QUEUE_NULL; t->wait_event = NO_EVENT64; t->at_safe_point = FALSE; /* returned locked */ } } wq_element = wqe_next; } }
/* * Routine: wait_queue_peek64_locked * Purpose: * Select the best thread from a wait queue that meet the * supplied criteria, but leave it on the queue it was * found on. The thread, and the actual wait_queue the * thread was found on are identified. * Conditions: * at splsched * wait queue locked * possibly recursive * Returns: * a locked thread - if one found * a locked waitq - the one the thread was found on * Note: * Both the waitq the thread was actually found on, and * the supplied wait queue, are locked after this. */ __private_extern__ void wait_queue_peek64_locked( wait_queue_t wq, event64_t event, thread_t *tp, wait_queue_t *wqp) { wait_queue_element_t wq_element; wait_queue_element_t wqe_next; thread_t t; queue_t q; assert(wq->wq_fifo); *tp = THREAD_NULL; q = &wq->wq_queue; wq_element = (wait_queue_element_t) queue_first(q); while (!queue_end(q, (queue_entry_t)wq_element)) { WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element); wqe_next = (wait_queue_element_t) queue_next((queue_t) wq_element); /* * We may have to recurse if this is a compound wait queue. */ if (wq_element->wqe_type == WAIT_QUEUE_LINK) { wait_queue_link_t wql = (wait_queue_link_t)wq_element; wait_queue_t set_queue; /* * We have to check the set wait queue. */ set_queue = (wait_queue_t)wql->wql_setqueue; wait_queue_lock(set_queue); if (! wait_queue_empty(set_queue)) { wait_queue_peek64_locked(set_queue, event, tp, wqp); } if (*tp != THREAD_NULL) { if (*wqp != set_queue) wait_queue_unlock(set_queue); return; /* thread and its waitq locked */ } wait_queue_unlock(set_queue); } else { /* * Otherwise, its a thread. If it is waiting on * the event we are posting to this queue, return * it locked, but leave it on the queue. */ thread_t t = (thread_t)wq_element; if (t->wait_event == event) { thread_lock(t); *tp = t; *wqp = wq; return; } } wq_element = wqe_next; } }