/* * Routine: wait_queue_assert_wait64_locked * Purpose: * Insert the current thread into the supplied wait queue * waiting for a particular event to be posted to that queue. * * Conditions: * The wait queue is assumed locked. * The waiting thread is assumed locked. * */ __private_extern__ wait_result_t wait_queue_assert_wait64_locked( wait_queue_t wq, event64_t event, wait_interrupt_t interruptible, thread_t thread) { wait_result_t wait_result; if (!wait_queue_assert_possible(thread)) panic("wait_queue_assert_wait64_locked"); if (wq->wq_type == _WAIT_QUEUE_SET_inited) { wait_queue_set_t wqs = (wait_queue_set_t)wq; if (wqs->wqs_isprepost && wqs->wqs_refcount > 0) return(THREAD_AWAKENED); } /* * This is the extent to which we currently take scheduling attributes * into account. If the thread is vm priviledged, we stick it at * the front of the queue. Later, these queues will honor the policy * value set at wait_queue_init time. */ wait_result = thread_mark_wait_locked(thread, interruptible); if (wait_result == THREAD_WAITING) { if (thread->vm_privilege) enqueue_head(&wq->wq_queue, (queue_entry_t) thread); else enqueue_tail(&wq->wq_queue, (queue_entry_t) thread); thread->wait_event = event; thread->wait_queue = wq; } return(wait_result); }
/* * Called at splsched. */ void ast_taken( ast_t reasons, boolean_t enable ) { register thread_t self = current_thread(); register int mycpu = cpu_number(); boolean_t preempt_trap = (reasons == AST_PREEMPTION); reasons &= need_ast[mycpu]; need_ast[mycpu] &= ~reasons; /* * Handle ASTs for all threads * except idle processor threads. */ if (!(self->state & TH_IDLE)) { /* * Check for urgent preemption. */ if ( (reasons & AST_URGENT) && wait_queue_assert_possible(self) ) { if (reasons & AST_PREEMPT) { counter(c_ast_taken_block++); thread_block_reason(THREAD_CONTINUE_NULL, AST_PREEMPT | AST_URGENT); } reasons &= ~AST_PREEMPTION; } /* * The kernel preempt traps * skip all other ASTs. */ if (!preempt_trap) { ml_set_interrupts_enabled(enable); #ifdef MACH_BSD /* * Handle BSD hook. */ if (reasons & AST_BSD) { extern void bsd_ast(thread_act_t act); thread_act_t act = self->top_act; thread_ast_clear(act, AST_BSD); bsd_ast(act); } #endif /* * Thread APC hook. */ if (reasons & AST_APC) act_execute_returnhandlers(); ml_set_interrupts_enabled(FALSE); /* * Check for preemption. */ if (reasons & AST_PREEMPT) { processor_t myprocessor = current_processor(); if (csw_needed(self, myprocessor)) reasons = AST_PREEMPT; else reasons = AST_NONE; } if ( (reasons & AST_PREEMPT) && wait_queue_assert_possible(self) ) { counter(c_ast_taken_block++); thread_block_reason(thread_exception_return, AST_PREEMPT); } } } ml_set_interrupts_enabled(enable); }
/* * Routine: wait_queue_assert_wait64_locked * Purpose: * Insert the current thread into the supplied wait queue * waiting for a particular event to be posted to that queue. * * Conditions: * The wait queue is assumed locked. * The waiting thread is assumed locked. * */ __private_extern__ wait_result_t wait_queue_assert_wait64_locked( wait_queue_t wq, event64_t event, wait_interrupt_t interruptible, uint64_t deadline, thread_t thread) { wait_result_t wait_result; boolean_t realtime; if (!wait_queue_assert_possible(thread)) panic("wait_queue_assert_wait64_locked"); if (wq->wq_type == _WAIT_QUEUE_SET_inited) { wait_queue_set_t wqs = (wait_queue_set_t)wq; if (event == NO_EVENT64 && wqs_is_preposted(wqs)) return(THREAD_AWAKENED); } /* * Realtime threads get priority for wait queue placements. * This allows wait_queue_wakeup_one to prefer a waiting * realtime thread, similar in principle to performing * a wait_queue_wakeup_all and allowing scheduler prioritization * to run the realtime thread, but without causing the * lock contention of that scenario. */ realtime = (thread->sched_pri >= BASEPRI_REALTIME); /* * This is the extent to which we currently take scheduling attributes * into account. If the thread is vm priviledged, we stick it at * the front of the queue. Later, these queues will honor the policy * value set at wait_queue_init time. */ wait_result = thread_mark_wait_locked(thread, interruptible); if (wait_result == THREAD_WAITING) { if (!wq->wq_fifo || (thread->options & TH_OPT_VMPRIV) || realtime) enqueue_head(&wq->wq_queue, (queue_entry_t) thread); else enqueue_tail(&wq->wq_queue, (queue_entry_t) thread); thread->wait_event = event; thread->wait_queue = wq; if (deadline != 0) { uint32_t flags; flags = realtime ? TIMER_CALL_CRITICAL : 0; if (!timer_call_enter(&thread->wait_timer, deadline, flags)) thread->wait_timer_active++; thread->wait_timer_is_set = TRUE; } } return(wait_result); }