/* * Create a new thread. * Doesn't start the thread running. */ static kern_return_t thread_create_internal( task_t parent_task, integer_t priority, thread_continue_t continuation, thread_t *out_thread) { thread_t new_thread; static thread_t first_thread; /* * Allocate a thread and initialize static fields */ if (first_thread == NULL) new_thread = first_thread = current_thread(); else new_thread = (thread_t)zalloc(thread_zone); if (new_thread == NULL) return (KERN_RESOURCE_SHORTAGE); if (new_thread != first_thread) *new_thread = thread_template; #ifdef MACH_BSD { new_thread->uthread = uthread_alloc(parent_task, new_thread); if (new_thread->uthread == NULL) { zfree(thread_zone, new_thread); return (KERN_RESOURCE_SHORTAGE); } } #endif /* MACH_BSD */ if (machine_thread_create(new_thread, parent_task) != KERN_SUCCESS) { #ifdef MACH_BSD { void *ut = new_thread->uthread; new_thread->uthread = NULL; /* cred free may not be necessary */ uthread_cleanup(parent_task, ut, parent_task->bsd_info); uthread_cred_free(ut); uthread_zone_free(ut); } #endif /* MACH_BSD */ zfree(thread_zone, new_thread); return (KERN_FAILURE); } new_thread->task = parent_task; thread_lock_init(new_thread); wake_lock_init(new_thread); mutex_init(&new_thread->mutex, 0); ipc_thread_init(new_thread); queue_init(&new_thread->held_ulocks); new_thread->continuation = continuation; mutex_lock(&tasks_threads_lock); task_lock(parent_task); if ( !parent_task->active || (parent_task->thread_count >= THREAD_MAX && parent_task != kernel_task)) { task_unlock(parent_task); mutex_unlock(&tasks_threads_lock); #ifdef MACH_BSD { void *ut = new_thread->uthread; new_thread->uthread = NULL; uthread_cleanup(parent_task, ut, parent_task->bsd_info); /* cred free may not be necessary */ uthread_cred_free(ut); uthread_zone_free(ut); } #endif /* MACH_BSD */ ipc_thread_disable(new_thread); ipc_thread_terminate(new_thread); machine_thread_destroy(new_thread); zfree(thread_zone, new_thread); return (KERN_FAILURE); } task_reference_internal(parent_task); /* Cache the task's map */ new_thread->map = parent_task->map; /* Chain the thread onto the task's list */ queue_enter(&parent_task->threads, new_thread, thread_t, task_threads); parent_task->thread_count++; /* So terminating threads don't need to take the task lock to decrement */ hw_atomic_add(&parent_task->active_thread_count, 1); queue_enter(&threads, new_thread, thread_t, threads); threads_count++; timer_call_setup(&new_thread->wait_timer, thread_timer_expire, new_thread); timer_call_setup(&new_thread->depress_timer, thread_depress_expire, new_thread); /* Set the thread's scheduling parameters */ if (parent_task != kernel_task) new_thread->sched_mode |= TH_MODE_TIMESHARE; new_thread->max_priority = parent_task->max_priority; new_thread->task_priority = parent_task->priority; new_thread->priority = (priority < 0)? parent_task->priority: priority; if (new_thread->priority > new_thread->max_priority) new_thread->priority = new_thread->max_priority; new_thread->importance = new_thread->priority - new_thread->task_priority; new_thread->sched_stamp = sched_tick; new_thread->pri_shift = sched_pri_shift; compute_priority(new_thread, FALSE); new_thread->active = TRUE; *out_thread = new_thread; { long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4; kdbg_trace_data(parent_task->bsd_info, &dbg_arg2); KERNEL_DEBUG_CONSTANT( TRACEDBG_CODE(DBG_TRACE_DATA, 1) | DBG_FUNC_NONE, (vm_address_t)new_thread, dbg_arg2, 0, 0, 0); kdbg_trace_string(parent_task->bsd_info, &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4); KERNEL_DEBUG_CONSTANT( TRACEDBG_CODE(DBG_TRACE_STRING, 1) | DBG_FUNC_NONE, dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0); } DTRACE_PROC1(lwp__create, thread_t, *out_thread); return (KERN_SUCCESS); }
/* * processor_request_action - common internals of processor_assign * and processor_shutdown. If new_pset is null, this is * a shutdown, else it's an assign and caller must donate * a reference. For assign operations, it returns * an old pset that must be deallocated if it's not NULL. For * shutdown operations, it always returns PROCESSOR_SET_NULL. */ processor_set_t processor_request_action( processor_t processor, processor_set_t new_pset) { register processor_set_t pset, old_next_pset; /* * Processor must be in a processor set. Must lock its idle lock to * get at processor state. */ pset = processor->processor_set; simple_lock(&pset->idle_lock); /* * If the processor is dispatching, let it finish - it will set its * state to running very soon. */ while (*(volatile int *)&processor->state == PROCESSOR_DISPATCHING) continue; /* * Now lock the action queue and do the dirty work. */ simple_lock(&action_lock); switch (processor->state) { case PROCESSOR_IDLE: /* * Remove from idle queue. */ queue_remove(&pset->idle_queue, processor, processor_t, processor_queue); pset->idle_count--; /* fall through ... */ case PROCESSOR_RUNNING: /* * Put it on the action queue. */ queue_enter(&action_queue, processor, processor_t, processor_queue); /* fall through ... */ case PROCESSOR_ASSIGN: /* * And ask the action_thread to do the work. */ if (new_pset == PROCESSOR_SET_NULL) { processor->state = PROCESSOR_SHUTDOWN; old_next_pset = PROCESSOR_SET_NULL; } else { processor->state = PROCESSOR_ASSIGN; old_next_pset = processor->processor_set_next; processor->processor_set_next = new_pset; } break; default: printf("state: %d\n", processor->state); panic("processor_request_action: bad state"); } simple_unlock(&action_lock); simple_unlock(&pset->idle_lock); thread_wakeup((event_t)&action_queue); return(old_next_pset); }
/* * Routine: _wait_queue_select64_all * Purpose: * Select all threads off a wait queue that meet the * supplied criteria. * Conditions: * at splsched * wait queue locked * wake_queue initialized and ready for insertion * possibly recursive * Returns: * a queue of locked threads */ static void _wait_queue_select64_all( wait_queue_t wq, event64_t event, queue_t wake_queue) { wait_queue_element_t wq_element; wait_queue_element_t wqe_next; queue_t q; q = &wq->wq_queue; wq_element = (wait_queue_element_t) queue_first(q); while (!queue_end(q, (queue_entry_t)wq_element)) { WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element); wqe_next = (wait_queue_element_t) queue_next((queue_t) wq_element); /* * We may have to recurse if this is a compound wait queue. */ if (wq_element->wqe_type == WAIT_QUEUE_LINK || wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) { wait_queue_link_t wql = (wait_queue_link_t)wq_element; wait_queue_set_t set_queue = wql->wql_setqueue; /* * We have to check the set wait queue. If it is marked * as pre-post, and it is the "generic event" then mark * it pre-posted now (if not already). */ wqs_lock(set_queue); if (event == NO_EVENT64 && set_queue->wqs_prepost && !wql_is_preposted(wql)) { queue_t ppq = &set_queue->wqs_preposts; queue_enter(ppq, wql, wait_queue_link_t, wql_preposts); } if (! wait_queue_empty(&set_queue->wqs_wait_queue)) _wait_queue_select64_all(&set_queue->wqs_wait_queue, event, wake_queue); wqs_unlock(set_queue); } else { /* * Otherwise, its a thread. If it is waiting on * the event we are posting to this queue, pull * it off the queue and stick it in out wake_queue. */ thread_t t = (thread_t)wq_element; if (t->wait_event == event) { thread_lock(t); remqueue((queue_entry_t) t); enqueue (wake_queue, (queue_entry_t) t); t->wait_queue = WAIT_QUEUE_NULL; t->wait_event = NO_EVENT64; t->at_safe_point = FALSE; /* returned locked */ } } wq_element = wqe_next; } }
/* * Routine: _wait_queue_select64_one * Purpose: * Select the best thread off a wait queue that meet the * supplied criteria. * Conditions: * at splsched * wait queue locked * possibly recursive * Returns: * a locked thread - if one found * Note: * This is where the sync policy of the wait queue comes * into effect. For now, we just assume FIFO/LIFO. */ static thread_t _wait_queue_select64_one( wait_queue_t wq, event64_t event) { wait_queue_element_t wq_element; wait_queue_element_t wqe_next; thread_t t = THREAD_NULL; queue_t q; q = &wq->wq_queue; wq_element = (wait_queue_element_t) queue_first(q); while (!queue_end(q, (queue_entry_t)wq_element)) { WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element); wqe_next = (wait_queue_element_t) queue_next((queue_t) wq_element); /* * We may have to recurse if this is a compound wait queue. */ if (wq_element->wqe_type == WAIT_QUEUE_LINK || wq_element->wqe_type == WAIT_QUEUE_LINK_NOALLOC) { wait_queue_link_t wql = (wait_queue_link_t)wq_element; wait_queue_set_t set_queue = wql->wql_setqueue; /* * We have to check the set wait queue. If the set * supports pre-posting, it isn't already preposted, * and we didn't find a thread in the set, then mark it. * * If we later find a thread, there may be a spurious * pre-post here on this set. The wait side has to check * for that either pre- or post-wait. */ wqs_lock(set_queue); if (! wait_queue_empty(&set_queue->wqs_wait_queue)) { t = _wait_queue_select64_one(&set_queue->wqs_wait_queue, event); } if (t != THREAD_NULL) { wqs_unlock(set_queue); return t; } if (event == NO_EVENT64 && set_queue->wqs_prepost && !wql_is_preposted(wql)) { queue_t ppq = &set_queue->wqs_preposts; queue_enter(ppq, wql, wait_queue_link_t, wql_preposts); } wqs_unlock(set_queue); } else { /* * Otherwise, its a thread. If it is waiting on * the event we are posting to this queue, pull * it off the queue and stick it in out wake_queue. */ t = (thread_t)wq_element; if (t->wait_event == event) { thread_lock(t); remqueue((queue_entry_t) t); t->wait_queue = WAIT_QUEUE_NULL; t->wait_event = NO_EVENT64; t->at_safe_point = FALSE; return t; /* still locked */ } t = THREAD_NULL; } wq_element = wqe_next; } return THREAD_NULL; }
/* * Create a new thread. * Doesn't start the thread running. */ static kern_return_t thread_create_internal( task_t parent_task, integer_t priority, thread_continue_t continuation, int options, #define TH_OPTION_NONE 0x00 #define TH_OPTION_NOCRED 0x01 #define TH_OPTION_NOSUSP 0x02 thread_t *out_thread) { thread_t new_thread; static thread_t first_thread = THREAD_NULL; /* * Allocate a thread and initialize static fields */ if (first_thread == THREAD_NULL) new_thread = first_thread = current_thread(); new_thread = (thread_t)zalloc(thread_zone); if (new_thread == THREAD_NULL) return (KERN_RESOURCE_SHORTAGE); if (new_thread != first_thread) *new_thread = thread_template; #ifdef MACH_BSD new_thread->uthread = uthread_alloc(parent_task, new_thread, (options & TH_OPTION_NOCRED) != 0); if (new_thread->uthread == NULL) { zfree(thread_zone, new_thread); return (KERN_RESOURCE_SHORTAGE); } #endif /* MACH_BSD */ if (machine_thread_create(new_thread, parent_task) != KERN_SUCCESS) { #ifdef MACH_BSD void *ut = new_thread->uthread; new_thread->uthread = NULL; /* cred free may not be necessary */ uthread_cleanup(parent_task, ut, parent_task->bsd_info); uthread_cred_free(ut); uthread_zone_free(ut); #endif /* MACH_BSD */ zfree(thread_zone, new_thread); return (KERN_FAILURE); } new_thread->task = parent_task; thread_lock_init(new_thread); wake_lock_init(new_thread); lck_mtx_init(&new_thread->mutex, &thread_lck_grp, &thread_lck_attr); ipc_thread_init(new_thread); queue_init(&new_thread->held_ulocks); new_thread->continuation = continuation; lck_mtx_lock(&tasks_threads_lock); task_lock(parent_task); if ( !parent_task->active || parent_task->halting || ((options & TH_OPTION_NOSUSP) != 0 && parent_task->suspend_count > 0) || (parent_task->thread_count >= task_threadmax && parent_task != kernel_task) ) { task_unlock(parent_task); lck_mtx_unlock(&tasks_threads_lock); #ifdef MACH_BSD { void *ut = new_thread->uthread; new_thread->uthread = NULL; uthread_cleanup(parent_task, ut, parent_task->bsd_info); /* cred free may not be necessary */ uthread_cred_free(ut); uthread_zone_free(ut); } #endif /* MACH_BSD */ ipc_thread_disable(new_thread); ipc_thread_terminate(new_thread); lck_mtx_destroy(&new_thread->mutex, &thread_lck_grp); machine_thread_destroy(new_thread); zfree(thread_zone, new_thread); return (KERN_FAILURE); } /* New threads inherit any default state on the task */ machine_thread_inherit_taskwide(new_thread, parent_task); task_reference_internal(parent_task); if (new_thread->task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) { /* * This task has a per-thread CPU limit; make sure this new thread * gets its limit set too, before it gets out of the kernel. */ set_astledger(new_thread); } new_thread->t_threadledger = LEDGER_NULL; /* per thread ledger is not inherited */ new_thread->t_ledger = new_thread->task->ledger; if (new_thread->t_ledger) ledger_reference(new_thread->t_ledger); /* Cache the task's map */ new_thread->map = parent_task->map; /* Chain the thread onto the task's list */ queue_enter(&parent_task->threads, new_thread, thread_t, task_threads); parent_task->thread_count++; /* So terminating threads don't need to take the task lock to decrement */ hw_atomic_add(&parent_task->active_thread_count, 1); /* Protected by the tasks_threads_lock */ new_thread->thread_id = ++thread_unique_id; queue_enter(&threads, new_thread, thread_t, threads); threads_count++; timer_call_setup(&new_thread->wait_timer, thread_timer_expire, new_thread); timer_call_setup(&new_thread->depress_timer, thread_depress_expire, new_thread); #if CONFIG_COUNTERS /* * If parent task has any reservations, they need to be propagated to this * thread. */ new_thread->t_chud = (TASK_PMC_FLAG == (parent_task->t_chud & TASK_PMC_FLAG)) ? THREAD_PMC_FLAG : 0U; #endif /* Set the thread's scheduling parameters */ new_thread->sched_mode = SCHED(initial_thread_sched_mode)(parent_task); new_thread->sched_flags = 0; new_thread->max_priority = parent_task->max_priority; new_thread->task_priority = parent_task->priority; new_thread->priority = (priority < 0)? parent_task->priority: priority; if (new_thread->priority > new_thread->max_priority) new_thread->priority = new_thread->max_priority; #if CONFIG_EMBEDDED if (new_thread->priority < MAXPRI_THROTTLE) { new_thread->priority = MAXPRI_THROTTLE; } #endif /* CONFIG_EMBEDDED */ new_thread->importance = new_thread->priority - new_thread->task_priority; #if CONFIG_EMBEDDED new_thread->saved_importance = new_thread->importance; /* apple ios daemon starts all threads in darwin background */ if (parent_task->ext_appliedstate.apptype == PROC_POLICY_IOS_APPLE_DAEMON) { /* Cannot use generic routines here so apply darwin bacground directly */ new_thread->policystate.hw_bg = TASK_POLICY_BACKGROUND_ATTRIBUTE_ALL; /* set thread self backgrounding */ new_thread->appliedstate.hw_bg = new_thread->policystate.hw_bg; /* priority will get recomputed suitably bit later */ new_thread->importance = INT_MIN; /* to avoid changes to many pri compute routines, set the effect of those here */ new_thread->priority = MAXPRI_THROTTLE; } #endif /* CONFIG_EMBEDDED */ #if defined(CONFIG_SCHED_TRADITIONAL) new_thread->sched_stamp = sched_tick; new_thread->pri_shift = sched_pri_shift; #endif SCHED(compute_priority)(new_thread, FALSE); new_thread->active = TRUE; *out_thread = new_thread; { long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4; kdbg_trace_data(parent_task->bsd_info, &dbg_arg2); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, TRACEDBG_CODE(DBG_TRACE_DATA, 1) | DBG_FUNC_NONE, (vm_address_t)(uintptr_t)thread_tid(new_thread), dbg_arg2, 0, 0, 0); kdbg_trace_string(parent_task->bsd_info, &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, TRACEDBG_CODE(DBG_TRACE_STRING, 1) | DBG_FUNC_NONE, dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0); } DTRACE_PROC1(lwp__create, thread_t, *out_thread); return (KERN_SUCCESS); }
/* * Create a new thread. * Doesn't start the thread running. */ static kern_return_t thread_create_internal( task_t parent_task, integer_t priority, thread_continue_t continuation, int options, #define TH_OPTION_NONE 0x00 #define TH_OPTION_NOCRED 0x01 #define TH_OPTION_NOSUSP 0x02 thread_t *out_thread) { thread_t new_thread; static thread_t first_thread; /* * Allocate a thread and initialize static fields */ if (first_thread == THREAD_NULL) new_thread = first_thread = current_thread(); else new_thread = (thread_t)zalloc(thread_zone); if (new_thread == THREAD_NULL) return (KERN_RESOURCE_SHORTAGE); if (new_thread != first_thread) *new_thread = thread_template; #ifdef MACH_BSD new_thread->uthread = uthread_alloc(parent_task, new_thread, (options & TH_OPTION_NOCRED) != 0); if (new_thread->uthread == NULL) { zfree(thread_zone, new_thread); return (KERN_RESOURCE_SHORTAGE); } #endif /* MACH_BSD */ if (machine_thread_create(new_thread, parent_task) != KERN_SUCCESS) { #ifdef MACH_BSD void *ut = new_thread->uthread; new_thread->uthread = NULL; /* cred free may not be necessary */ uthread_cleanup(parent_task, ut, parent_task->bsd_info); uthread_cred_free(ut); uthread_zone_free(ut); #endif /* MACH_BSD */ zfree(thread_zone, new_thread); return (KERN_FAILURE); } new_thread->task = parent_task; thread_lock_init(new_thread); wake_lock_init(new_thread); lck_mtx_init(&new_thread->mutex, &thread_lck_grp, &thread_lck_attr); ipc_thread_init(new_thread); queue_init(&new_thread->held_ulocks); new_thread->continuation = continuation; lck_mtx_lock(&tasks_threads_lock); task_lock(parent_task); if ( !parent_task->active || parent_task->halting || ((options & TH_OPTION_NOSUSP) != 0 && parent_task->suspend_count > 0) || (parent_task->thread_count >= task_threadmax && parent_task != kernel_task) ) { task_unlock(parent_task); lck_mtx_unlock(&tasks_threads_lock); #ifdef MACH_BSD { void *ut = new_thread->uthread; new_thread->uthread = NULL; uthread_cleanup(parent_task, ut, parent_task->bsd_info); /* cred free may not be necessary */ uthread_cred_free(ut); uthread_zone_free(ut); } #endif /* MACH_BSD */ ipc_thread_disable(new_thread); ipc_thread_terminate(new_thread); lck_mtx_destroy(&new_thread->mutex, &thread_lck_grp); machine_thread_destroy(new_thread); zfree(thread_zone, new_thread); return (KERN_FAILURE); } /* New threads inherit any default state on the task */ machine_thread_inherit_taskwide(new_thread, parent_task); task_reference_internal(parent_task); /* Cache the task's map */ new_thread->map = parent_task->map; /* Chain the thread onto the task's list */ queue_enter(&parent_task->threads, new_thread, thread_t, task_threads); parent_task->thread_count++; /* So terminating threads don't need to take the task lock to decrement */ hw_atomic_add(&parent_task->active_thread_count, 1); /* Protected by the tasks_threads_lock */ new_thread->thread_id = ++thread_unique_id; queue_enter(&threads, new_thread, thread_t, threads); threads_count++; timer_call_setup(&new_thread->wait_timer, thread_timer_expire, new_thread); timer_call_setup(&new_thread->depress_timer, thread_depress_expire, new_thread); #if CONFIG_COUNTERS /* * If parent task has any reservations, they need to be propagated to this * thread. */ new_thread->t_chud = (TASK_PMC_FLAG == (parent_task->t_chud & TASK_PMC_FLAG)) ? THREAD_PMC_FLAG : 0U; #endif /* Set the thread's scheduling parameters */ if (parent_task != kernel_task) new_thread->sched_mode |= TH_MODE_TIMESHARE; new_thread->max_priority = parent_task->max_priority; new_thread->task_priority = parent_task->priority; new_thread->priority = (priority < 0)? parent_task->priority: priority; if (new_thread->priority > new_thread->max_priority) new_thread->priority = new_thread->max_priority; new_thread->importance = new_thread->priority - new_thread->task_priority; new_thread->sched_stamp = sched_tick; new_thread->pri_shift = sched_pri_shift; compute_priority(new_thread, FALSE); new_thread->active = TRUE; *out_thread = new_thread; { long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4; kdbg_trace_data(parent_task->bsd_info, &dbg_arg2); KERNEL_DEBUG_CONSTANT( TRACEDBG_CODE(DBG_TRACE_DATA, 1) | DBG_FUNC_NONE, (vm_address_t)(uintptr_t)thread_tid(new_thread), dbg_arg2, 0, 0, 0); kdbg_trace_string(parent_task->bsd_info, &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4); KERNEL_DEBUG_CONSTANT( TRACEDBG_CODE(DBG_TRACE_STRING, 1) | DBG_FUNC_NONE, dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0); } DTRACE_PROC1(lwp__create, thread_t, *out_thread); return (KERN_SUCCESS); }
wait_result_t ipc_mqueue_receive_on_thread( ipc_mqueue_t mqueue, mach_msg_option_t option, mach_msg_size_t max_size, mach_msg_timeout_t rcv_timeout, int interruptible, thread_t thread) { ipc_kmsg_queue_t kmsgs; wait_result_t wresult; uint64_t deadline; spl_t s; s = splsched(); imq_lock(mqueue); if (imq_is_set(mqueue)) { queue_t q; q = &mqueue->imq_preposts; /* * If we are waiting on a portset mqueue, we need to see if * any of the member ports have work for us. Ports that * have (or recently had) messages will be linked in the * prepost queue for the portset. By holding the portset's * mqueue lock during the search, we tie up any attempts by * mqueue_deliver or portset membership changes that may * cross our path. */ search_set: while(!queue_empty(q)) { wait_queue_link_t wql; ipc_mqueue_t port_mq; queue_remove_first(q, wql, wait_queue_link_t, wql_preposts); assert(!wql_is_preposted(wql)); /* * This is a lock order violation, so we have to do it * "softly," putting the link back on the prepost list * if it fails (at the tail is fine since the order of * handling messages from different sources in a set is * not guaranteed and we'd like to skip to the next source * if one is available). */ port_mq = (ipc_mqueue_t)wql->wql_queue; if (!imq_lock_try(port_mq)) { queue_enter(q, wql, wait_queue_link_t, wql_preposts); imq_unlock(mqueue); splx(s); mutex_pause(0); s = splsched(); imq_lock(mqueue); goto search_set; /* start again at beginning - SMP */ } /* * If there are no messages on this queue, just skip it * (we already removed the link from the set's prepost queue). */ kmsgs = &port_mq->imq_messages; if (ipc_kmsg_queue_first(kmsgs) == IKM_NULL) { imq_unlock(port_mq); continue; } /* * There are messages, so reinsert the link back * at the tail of the preposted queue (for fairness) * while we still have the portset mqueue locked. */ queue_enter(q, wql, wait_queue_link_t, wql_preposts); imq_unlock(mqueue); /* * Continue on to handling the message with just * the port mqueue locked. */ ipc_mqueue_select_on_thread(port_mq, option, max_size, thread); imq_unlock(port_mq); splx(s); return THREAD_NOT_WAITING; } } else { /* * Receive on a single port. Just try to get the messages. */ kmsgs = &mqueue->imq_messages; if (ipc_kmsg_queue_first(kmsgs) != IKM_NULL) { ipc_mqueue_select_on_thread(mqueue, option, max_size, thread); imq_unlock(mqueue); splx(s); return THREAD_NOT_WAITING; } } /* * Looks like we'll have to block. The mqueue we will * block on (whether the set's or the local port's) is * still locked. */ if (option & MACH_RCV_TIMEOUT) { if (rcv_timeout == 0) { imq_unlock(mqueue); splx(s); thread->ith_state = MACH_RCV_TIMED_OUT; return THREAD_NOT_WAITING; } } thread_lock(thread); thread->ith_state = MACH_RCV_IN_PROGRESS; thread->ith_option = option; thread->ith_msize = max_size; if (option & MACH_RCV_TIMEOUT) clock_interval_to_deadline(rcv_timeout, 1000*NSEC_PER_USEC, &deadline); else deadline = 0; wresult = wait_queue_assert_wait64_locked(&mqueue->imq_wait_queue, IPC_MQUEUE_RECEIVE, interruptible, TIMEOUT_URGENCY_USER_NORMAL, deadline, 0, thread); /* preposts should be detected above, not here */ if (wresult == THREAD_AWAKENED) panic("ipc_mqueue_receive_on_thread: sleep walking"); thread_unlock(thread); imq_unlock(mqueue); splx(s); return wresult; }
/* * Manage next request event */ void datadev_request( datadev_t ddp) { kern_return_t rc; io_req_t ior; spl_t s; s = splsched(); mutex_lock(&datadev_lock); if (ddp != (datadev_t)0) { /* * Queue current request */ queue_enter(&datadev_wait, ddp, datadev_t, dd_chain); } /* * Try to start next request */ if (queue_empty(&datadev_wait) || datadev_ior == (io_req_t)0) { /* * No request or no pending read */ mutex_unlock(&datadev_lock); splx(s); return; } /* * Extract first waiting request */ ddp = (datadev_t)queue_first(&datadev_wait); /* * Extract pending I/O request */ ior = datadev_ior; datadev_ior = (io_req_t)0; /* * Allocate read memory */ if (ior->io_count < ddp->dd_size) { /* * Return size error for this request */ mutex_unlock(&datadev_lock); splx(s); ior->io_error = D_INVALID_SIZE; } else { /* * Move waiting request from the waiting queue to the active one. */ queue_remove(&datadev_wait, ddp, datadev_t, dd_chain); queue_enter(&datadev_curr, ddp, datadev_t, dd_chain); mutex_unlock(&datadev_lock); splx(s); /* * Activate the request */ bcopy(ddp->dd_name, ior->io_data, ddp->dd_size); ddp->dd_dev = ior->io_unit; ior->io_residual = ior->io_count - ddp->dd_size; ior->io_error = D_SUCCESS; } io_completed(ior, FALSE); }
/* * task_swapout: * A reference to the task must be held. * * Start swapping out a task by sending an AST_SWAPOUT to each thread. * When the threads reach a clean point, they queue themselves up on the * swapout_thread_q to be swapped out by the task_swap_swapout_thread. * The task can be swapped in at any point in this process. * * A task will not be fully swapped out (i.e. its map residence count * at zero) until all currently-swapped threads run and reach * a clean point, at which time they will be swapped again, * decrementing the swap_ast_waiting count on the task. * * Locking: no locks held upon entry and exit. * Task_lock is held throughout this function. */ kern_return_t task_swapout(task_t task) { thread_act_t thr_act; thread_t thread; queue_head_t *list; int s; task_swapout_lock(); task_lock(task); /* * NOTE: look into turning these into assertions if they * are invariants. */ if ((task->swap_state != TASK_SW_IN) || (!task->active)) { task_unlock(task); task_swapout_unlock(); return(KERN_FAILURE); } if (task->swap_flags & TASK_SW_ELIGIBLE) { queue_remove(&eligible_tasks, task, task_t, swapped_tasks); task->swap_flags &= ~TASK_SW_ELIGIBLE; } task_swapout_unlock(); /* set state to avoid races with task_swappable(FALSE) */ task->swap_state = TASK_SW_GOING_OUT; task->swap_rss = pmap_resident_count(task->map->pmap); task_swaprss_out += task->swap_rss; task->swap_ast_waiting = task->thr_act_count; /* * halt all threads in this task: * We don't need the thread list lock for traversal. */ list = &task->thr_acts; thr_act = (thread_act_t) queue_first(list); while (!queue_end(list, (queue_entry_t) thr_act)) { boolean_t swappable; thread_act_t ract; thread = act_lock_thread(thr_act); s = splsched(); if (!thread) swappable = (thr_act->swap_state != TH_SW_UNSWAPPABLE); else { thread_lock(thread); swappable = TRUE; for (ract = thread->top_act; ract; ract = ract->lower) if (ract->swap_state == TH_SW_UNSWAPPABLE) { swappable = FALSE; break; } } if (swappable) thread_ast_set(thr_act, AST_SWAPOUT); if (thread) thread_unlock(thread); splx(s); assert((thr_act->ast & AST_TERMINATE) == 0); act_unlock_thread(thr_act); thr_act = (thread_act_t) queue_next(&thr_act->thr_acts); } task->swap_stamp = sched_tick; task->swap_nswap++; assert((task->swap_flags&TASK_SW_WANT_IN) == 0); /* put task on the queue of swapped out tasks */ task_swapper_lock(); #if TASK_SW_DEBUG if (task_swap_debug && on_swapped_list(task)) { printf("task 0x%X already on list\n", task); Debugger(""); } #endif /* TASK_SW_DEBUG */ queue_enter(&swapped_tasks, task, task_t, swapped_tasks); tasks_swapped_out++; task_swapouts++; task_swapper_unlock(); task_unlock(task); return(KERN_SUCCESS); }