/* * Routine: wait_queue_link_internal * Purpose: * Insert a set wait queue into a wait queue. This * requires us to link the two together using a wait_queue_link * structure that was provided. * Conditions: * The wait queue being inserted must be inited as a set queue * The wait_queue_link structure must already be properly typed */ static kern_return_t wait_queue_link_internal( wait_queue_t wq, wait_queue_set_t wq_set, wait_queue_link_t wql) { wait_queue_element_t wq_element; queue_t q; spl_t s; if (!wait_queue_is_valid(wq) || !wait_queue_is_set(wq_set)) return KERN_INVALID_ARGUMENT; /* * There are probably fewer threads and sets associated with * the wait queue than there are wait queues associated with * the set. So let's validate it that way. */ s = splsched(); wait_queue_lock(wq); q = &wq->wq_queue; wq_element = (wait_queue_element_t) queue_first(q); while (!queue_end(q, (queue_entry_t)wq_element)) { WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element); if ((wq_element->wqe_type == WAIT_QUEUE_LINK || wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) && ((wait_queue_link_t)wq_element)->wql_setqueue == wq_set) { wait_queue_unlock(wq); splx(s); return KERN_ALREADY_IN_SET; } wq_element = (wait_queue_element_t) queue_next((queue_t) wq_element); } /* * Not already a member, so we can add it. */ wqs_lock(wq_set); WAIT_QUEUE_SET_CHECK(wq_set); assert(wql->wql_type == WAIT_QUEUE_LINK || wql->wql_type == WAIT_QUEUE_LINK_NOALLOC); wql->wql_queue = wq; wql_clear_prepost(wql); queue_enter(&wq->wq_queue, wql, wait_queue_link_t, wql_links); wql->wql_setqueue = wq_set; queue_enter(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks); wqs_unlock(wq_set); wait_queue_unlock(wq); splx(s); return KERN_SUCCESS; }
kern_return_t wait_queue_unlink_all( wait_queue_t wq) { wait_queue_element_t wq_element; wait_queue_element_t wq_next_element; wait_queue_set_t wq_set; wait_queue_link_t wql; queue_head_t links_queue_head; queue_t links = &links_queue_head; queue_t q; spl_t s; if (!wait_queue_is_valid(wq)) { return KERN_INVALID_ARGUMENT; } queue_init(links); s = splsched(); wait_queue_lock(wq); q = &wq->wq_queue; wq_element = (wait_queue_element_t) queue_first(q); while (!queue_end(q, (queue_entry_t)wq_element)) { boolean_t alloced; WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element); wq_next_element = (wait_queue_element_t) queue_next((queue_t) wq_element); alloced = (wq_element->wqe_type == WAIT_QUEUE_LINK); if (alloced || wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) { wql = (wait_queue_link_t)wq_element; wq_set = wql->wql_setqueue; wqs_lock(wq_set); wait_queue_unlink_locked(wq, wq_set, wql); wqs_unlock(wq_set); if (alloced) enqueue(links, &wql->wql_links); } wq_element = wq_next_element; } wait_queue_unlock(wq); splx(s); while(!queue_empty(links)) { wql = (wait_queue_link_t) dequeue(links); zfree(_wait_queue_link_zone, wql); } return(KERN_SUCCESS); }
kern_return_t wait_queue_sub_clearrefs( wait_queue_set_t wq_set) { if (!wait_queue_is_set(wq_set)) return KERN_INVALID_ARGUMENT; wqs_lock(wq_set); wq_set->wqs_refcount = 0; wqs_unlock(wq_set); return KERN_SUCCESS; }
/* * Routine: wait_queue_set_unlink_all * Purpose: * Remove the linkage between a set wait queue and all its * member wait queues. The link structures are freed for those * links which were dynamically allocated. * Conditions: * The wait queue must be a set */ kern_return_t wait_queue_set_unlink_all( wait_queue_set_t wq_set) { wait_queue_link_t wql; wait_queue_t wq; queue_t q; queue_head_t links_queue_head; queue_t links = &links_queue_head; spl_t s; if (!wait_queue_is_set(wq_set)) { return KERN_INVALID_ARGUMENT; } queue_init(links); retry: s = splsched(); wqs_lock(wq_set); q = &wq_set->wqs_setlinks; wql = (wait_queue_link_t)queue_first(q); while (!queue_end(q, (queue_entry_t)wql)) { WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql); wq = wql->wql_queue; if (wait_queue_lock_try(wq)) { boolean_t alloced; alloced = (wql->wql_type == WAIT_QUEUE_LINK); wait_queue_unlink_locked(wq, wq_set, wql); wait_queue_unlock(wq); if (alloced) enqueue(links, &wql->wql_links); wql = (wait_queue_link_t)queue_first(q); } else { wqs_unlock(wq_set); splx(s); delay(1); goto retry; } } wqs_unlock(wq_set); splx(s); while (!queue_empty (links)) { wql = (wait_queue_link_t) dequeue(links); zfree(_wait_queue_link_zone, wql); } return(KERN_SUCCESS); }
/* * Routine: wait_queue_select64_thread * Purpose: * Look for a thread and remove it from the queues, if * (and only if) the thread is waiting on the supplied * <wait_queue, event> pair. * Conditions: * at splsched * wait queue locked * possibly recursive * Returns: * KERN_NOT_WAITING: Thread is not waiting here. * KERN_SUCCESS: It was, and is now removed (returned locked) */ static kern_return_t _wait_queue_select64_thread( wait_queue_t wq, event64_t event, thread_t thread) { wait_queue_element_t wq_element; wait_queue_element_t wqe_next; kern_return_t res = KERN_NOT_WAITING; queue_t q = &wq->wq_queue; thread_lock(thread); if ((thread->wait_queue == wq) && (thread->wait_event == event)) { remqueue((queue_entry_t) thread); thread->at_safe_point = FALSE; thread->wait_event = NO_EVENT64; thread->wait_queue = WAIT_QUEUE_NULL; /* thread still locked */ return KERN_SUCCESS; } thread_unlock(thread); /* * The wait_queue associated with the thread may be one of this * wait queue's sets. Go see. If so, removing it from * there is like removing it from here. */ wq_element = (wait_queue_element_t) queue_first(q); while (!queue_end(q, (queue_entry_t)wq_element)) { WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element); wqe_next = (wait_queue_element_t) queue_next((queue_t) wq_element); if (wq_element->wqe_type == WAIT_QUEUE_LINK || wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) { wait_queue_link_t wql = (wait_queue_link_t)wq_element; wait_queue_set_t set_queue = wql->wql_setqueue; wqs_lock(set_queue); if (! wait_queue_empty(&set_queue->wqs_wait_queue)) { res = _wait_queue_select64_thread(&set_queue->wqs_wait_queue, event, thread); } wqs_unlock(set_queue); if (res == KERN_SUCCESS) return KERN_SUCCESS; } wq_element = wqe_next; } return res; }
kern_return_t wait_queue_unlink_all( wait_queue_t wq) { wait_queue_element_t wq_element; wait_queue_element_t wq_next_element; wait_queue_set_t wq_set; wait_queue_link_t wql; queue_head_t links_queue_head; queue_t links = &links_queue_head; queue_t q; spl_t s; if (!wait_queue_is_queue(wq)) { return KERN_INVALID_ARGUMENT; } queue_init(links); s = splsched(); wait_queue_lock(wq); q = &wq->wq_queue; wq_element = (wait_queue_element_t) queue_first(q); while (!queue_end(q, (queue_entry_t)wq_element)) { WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element); wq_next_element = (wait_queue_element_t) queue_next((queue_t) wq_element); if (wq_element->wqe_type == WAIT_QUEUE_LINK) { wql = (wait_queue_link_t)wq_element; wq_set = wql->wql_setqueue; wqs_lock(wq_set); wait_queue_unlink_locked(wq, wq_set, wql); wqs_unlock(wq_set); enqueue(links, &wql->wql_links); } wq_element = wq_next_element; } wait_queue_unlock(wq); splx(s); while(!queue_empty(links)) { wql = (wait_queue_link_t) dequeue(links); kfree((vm_offset_t) wql, sizeof(struct wait_queue_link)); } return(KERN_SUCCESS); }
/* * Routine: wait_queue_set_unlink_all * Purpose: * Remove the linkage between a set wait queue and all its * member wait queues. The link structures are freed. * Conditions: * The wait queue must be a set */ kern_return_t wait_queue_set_unlink_all( wait_queue_set_t wq_set) { wait_queue_link_t wql; wait_queue_t wq; queue_t q; queue_head_t links_queue_head; queue_t links = &links_queue_head; kern_return_t kret; spl_t s; if (!wait_queue_is_set(wq_set)) { return KERN_INVALID_ARGUMENT; } queue_init(links); retry: s = splsched(); wqs_lock(wq_set); q = &wq_set->wqs_setlinks; wql = (wait_queue_link_t)queue_first(q); while (!queue_end(q, (queue_entry_t)wql)) { WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql); wq = wql->wql_queue; if (wait_queue_lock_try(wq)) { wait_queue_unlink_locked(wq, wq_set, wql); wait_queue_unlock(wq); enqueue(links, &wql->wql_links); wql = (wait_queue_link_t)queue_first(q); } else { wqs_unlock(wq_set); splx(s); delay(1); goto retry; } } wqs_unlock(wq_set); splx(s); while (!queue_empty (links)) { wql = (wait_queue_link_t) dequeue(links); kfree((vm_offset_t)wql, sizeof(struct wait_queue_link)); } return(KERN_SUCCESS); }
/* * Routine: wait_queue_unlink * Purpose: * Remove the linkage between a wait queue and a set, * freeing the linkage structure. * Conditions: * The wait queue being must be a member set queue */ kern_return_t wait_queue_unlink( wait_queue_t wq, wait_queue_set_t wq_set) { wait_queue_element_t wq_element; wait_queue_link_t wql; queue_t q; spl_t s; if (!wait_queue_is_valid(wq) || !wait_queue_is_set(wq_set)) { return KERN_INVALID_ARGUMENT; } s = splsched(); wait_queue_lock(wq); q = &wq->wq_queue; wq_element = (wait_queue_element_t) queue_first(q); while (!queue_end(q, (queue_entry_t)wq_element)) { WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element); if (wq_element->wqe_type == WAIT_QUEUE_LINK || wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) { wql = (wait_queue_link_t)wq_element; if (wql->wql_setqueue == wq_set) { boolean_t alloced; alloced = (wql->wql_type == WAIT_QUEUE_LINK); wqs_lock(wq_set); wait_queue_unlink_locked(wq, wq_set, wql); wqs_unlock(wq_set); wait_queue_unlock(wq); splx(s); if (alloced) zfree(_wait_queue_link_zone, wql); return KERN_SUCCESS; } } wq_element = (wait_queue_element_t) queue_next((queue_t) wq_element); } wait_queue_unlock(wq); splx(s); return KERN_NOT_IN_SET; }
kern_return_t wait_queue_set_unlink_one( wait_queue_set_t wq_set, wait_queue_link_t wql) { wait_queue_t wq; spl_t s; assert(wait_queue_is_set(wq_set)); retry: s = splsched(); wqs_lock(wq_set); WAIT_QUEUE_SET_CHECK(wq_set); /* Already unlinked, e.g. by selclearthread() */ if (wql->wql_type == WAIT_QUEUE_UNLINKED) { goto out; } WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql); /* On a wait queue, and we hold set queue lock ... */ wq = wql->wql_queue; if (wait_queue_lock_try(wq)) { wait_queue_unlink_locked(wq, wq_set, wql); wait_queue_unlock(wq); } else { wqs_unlock(wq_set); splx(s); delay(1); goto retry; } out: wqs_unlock(wq_set); splx(s); return KERN_SUCCESS; }
/* * Routine: wait_queue_set_unlink_all_nofree * Purpose: * Remove the linkage between a set wait queue and all its * member wait queues. The link structures are not freed, nor * returned. It is the caller's responsibility to track and free * them. * Conditions: * The wait queue being must be a member set queue */ kern_return_t wait_queue_set_unlink_all_nofree( wait_queue_set_t wq_set) { wait_queue_link_t wql; wait_queue_t wq; queue_t q; kern_return_t kret; spl_t s; if (!wait_queue_is_set(wq_set)) { return KERN_INVALID_ARGUMENT; } retry: s = splsched(); wqs_lock(wq_set); q = &wq_set->wqs_setlinks; wql = (wait_queue_link_t)queue_first(q); while (!queue_end(q, (queue_entry_t)wql)) { WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql); wq = wql->wql_queue; if (wait_queue_lock_try(wq)) { wait_queue_unlink_locked(wq, wq_set, wql); wait_queue_unlock(wq); wql = (wait_queue_link_t)queue_first(q); } else { wqs_unlock(wq_set); splx(s); delay(1); goto retry; } } wqs_unlock(wq_set); splx(s); return(KERN_SUCCESS); }
/* * Routine: wait_queue_unlink * Purpose: * Remove the linkage between a wait queue and a set, * freeing the linkage structure. * Conditions: * The wait queue being must be a member set queue */ kern_return_t wait_queue_unlink( wait_queue_t wq, wait_queue_set_t wq_set) { wait_queue_element_t wq_element; wait_queue_link_t wql; queue_t q; spl_t s; if (!wait_queue_is_queue(wq) || !wait_queue_is_set(wq_set)) { return KERN_INVALID_ARGUMENT; } s = splsched(); wait_queue_lock(wq); q = &wq->wq_queue; wq_element = (wait_queue_element_t) queue_first(q); while (!queue_end(q, (queue_entry_t)wq_element)) { WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element); if (wq_element->wqe_type == WAIT_QUEUE_LINK) { wql = (wait_queue_link_t)wq_element; if (wql->wql_setqueue == wq_set) { wqs_lock(wq_set); wait_queue_unlink_locked(wq, wq_set, wql); wqs_unlock(wq_set); wait_queue_unlock(wq); splx(s); kfree((vm_offset_t)wql, sizeof(struct wait_queue_link)); return KERN_SUCCESS; } } wq_element = (wait_queue_element_t) queue_next((queue_t) wq_element); } wait_queue_unlock(wq); splx(s); return KERN_NOT_IN_SET; }
kern_return_t wait_queue_sub_clearrefs( wait_queue_set_t wq_set) { wait_queue_link_t wql; queue_t q; spl_t s; if (!wait_queue_is_set(wq_set)) return KERN_INVALID_ARGUMENT; s = splsched(); wqs_lock(wq_set); q = &wq_set->wqs_preposts; while (!queue_empty(q)) { queue_remove_first(q, wql, wait_queue_link_t, wql_preposts); assert(!wql_is_preposted(wql)); } wqs_unlock(wq_set); splx(s); return KERN_SUCCESS; }
/* * Routine: wait_queue_unlink_one * Purpose: * Find and unlink one set wait queue * Conditions: * Nothing of interest locked. */ void wait_queue_unlink_one( wait_queue_t wq, wait_queue_set_t *wq_setp) { wait_queue_element_t wq_element; queue_t q; spl_t s; s = splsched(); wait_queue_lock(wq); q = &wq->wq_queue; wq_element = (wait_queue_element_t) queue_first(q); while (!queue_end(q, (queue_entry_t)wq_element)) { if (wq_element->wqe_type == WAIT_QUEUE_LINK) { wait_queue_link_t wql = (wait_queue_link_t)wq_element; wait_queue_set_t wq_set = wql->wql_setqueue; wqs_lock(wq_set); wait_queue_unlink_locked(wq, wq_set, wql); wqs_unlock(wq_set); wait_queue_unlock(wq); splx(s); kfree((vm_offset_t)wql,sizeof(struct wait_queue_link)); *wq_setp = wq_set; return; } wq_element = (wait_queue_element_t) queue_next((queue_t) wq_element); } wait_queue_unlock(wq); splx(s); *wq_setp = WAIT_QUEUE_SET_NULL; }
/* * Routine: _wait_queue_select64_one * Purpose: * Select the best thread off a wait queue that meet the * supplied criteria. * Conditions: * at splsched * wait queue locked * possibly recursive * Returns: * a locked thread - if one found * Note: * This is where the sync policy of the wait queue comes * into effect. For now, we just assume FIFO/LIFO. */ static thread_t _wait_queue_select64_one( wait_queue_t wq, event64_t event) { wait_queue_element_t wq_element; wait_queue_element_t wqe_next; thread_t t = THREAD_NULL; queue_t q; q = &wq->wq_queue; wq_element = (wait_queue_element_t) queue_first(q); while (!queue_end(q, (queue_entry_t)wq_element)) { WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element); wqe_next = (wait_queue_element_t) queue_next((queue_t) wq_element); /* * We may have to recurse if this is a compound wait queue. */ if (wq_element->wqe_type == WAIT_QUEUE_LINK || wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) { wait_queue_link_t wql = (wait_queue_link_t)wq_element; wait_queue_set_t set_queue = wql->wql_setqueue; /* * We have to check the set wait queue. If the set * supports pre-posting, it isn't already preposted, * and we didn't find a thread in the set, then mark it. * * If we later find a thread, there may be a spurious * pre-post here on this set. The wait side has to check * for that either pre- or post-wait. */ wqs_lock(set_queue); if (! wait_queue_empty(&set_queue->wqs_wait_queue)) { t = _wait_queue_select64_one(&set_queue->wqs_wait_queue, event); } if (t != THREAD_NULL) { wqs_unlock(set_queue); return t; } if (event == NO_EVENT64 && set_queue->wqs_prepost && !wql_is_preposted(wql)) { queue_t ppq = &set_queue->wqs_preposts; queue_enter(ppq, wql, wait_queue_link_t, wql_preposts); } wqs_unlock(set_queue); } else { /* * Otherwise, its a thread. If it is waiting on * the event we are posting to this queue, pull * it off the queue and stick it in out wake_queue. */ t = (thread_t)wq_element; if (t->wait_event == event) { thread_lock(t); remqueue((queue_entry_t) t); t->wait_queue = WAIT_QUEUE_NULL; t->wait_event = NO_EVENT64; t->at_safe_point = FALSE; return t; /* still locked */ } t = THREAD_NULL; } wq_element = wqe_next; } return THREAD_NULL; }
/* * Routine: _wait_queue_select64_all * Purpose: * Select all threads off a wait queue that meet the * supplied criteria. * Conditions: * at splsched * wait queue locked * wake_queue initialized and ready for insertion * possibly recursive * Returns: * a queue of locked threads */ static void _wait_queue_select64_all( wait_queue_t wq, event64_t event, queue_t wake_queue) { wait_queue_element_t wq_element; wait_queue_element_t wqe_next; queue_t q; q = &wq->wq_queue; wq_element = (wait_queue_element_t) queue_first(q); while (!queue_end(q, (queue_entry_t)wq_element)) { WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element); wqe_next = (wait_queue_element_t) queue_next((queue_t) wq_element); /* * We may have to recurse if this is a compound wait queue. */ if (wq_element->wqe_type == WAIT_QUEUE_LINK || wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) { wait_queue_link_t wql = (wait_queue_link_t)wq_element; wait_queue_set_t set_queue = wql->wql_setqueue; /* * We have to check the set wait queue. If it is marked * as pre-post, and it is the "generic event" then mark * it pre-posted now (if not already). */ wqs_lock(set_queue); if (event == NO_EVENT64 && set_queue->wqs_prepost && !wql_is_preposted(wql)) { queue_t ppq = &set_queue->wqs_preposts; queue_enter(ppq, wql, wait_queue_link_t, wql_preposts); } if (! wait_queue_empty(&set_queue->wqs_wait_queue)) _wait_queue_select64_all(&set_queue->wqs_wait_queue, event, wake_queue); wqs_unlock(set_queue); } else { /* * Otherwise, its a thread. If it is waiting on * the event we are posting to this queue, pull * it off the queue and stick it in out wake_queue. */ thread_t t = (thread_t)wq_element; if (t->wait_event == event) { thread_lock(t); remqueue((queue_entry_t) t); enqueue (wake_queue, (queue_entry_t) t); t->wait_queue = WAIT_QUEUE_NULL; t->wait_event = NO_EVENT64; t->at_safe_point = FALSE; /* returned locked */ } } wq_element = wqe_next; } }