Exemple #1
0
void
lck_grp_init(
	lck_grp_t		*grp,               
	const char*		grp_name,           
	lck_grp_attr_t	*attr)             
{
	bzero((void *)grp, sizeof(lck_grp_t));

	(void) strncpy(grp->lck_grp_name, grp_name, LCK_GRP_MAX_NAME);

	if (attr != LCK_GRP_ATTR_NULL)
		grp->lck_grp_attr = attr->grp_attr_val;
	else if (LcksOpts & enaLkStat)
                grp->lck_grp_attr = LCK_GRP_ATTR_STAT;
        else
                grp->lck_grp_attr = LCK_ATTR_NONE;

	grp->lck_grp_refcnt = 1;

	mutex_lock(&lck_grp_lock);
	enqueue_tail(&lck_grp_queue, (queue_entry_t)grp);
	lck_grp_cnt++;
	mutex_unlock(&lck_grp_lock);

}
Exemple #2
0
void thread_swapin(thread_t thread)
{
	switch (thread->state & TH_SWAP_STATE) {
	    case TH_SWAPPED:
		/*
		 *	Swapped out - queue for swapin thread.
		 */
		thread->state = (thread->state & ~TH_SWAP_STATE)
				| TH_SW_COMING_IN;
		swapper_lock();
		enqueue_tail(&swapin_queue, &(thread->links));
		swapper_unlock();
		thread_wakeup((event_t) &swapin_queue);
		break;

	    case TH_SW_COMING_IN:
		/*
		 *	Already queued for swapin thread, or being
		 *	swapped in.
		 */
		break;

	    default:
		/*
		 *	Already swapped in.
		 */
		panic("thread_swapin");
	}
}
Exemple #3
0
/*
 *	processor_up:
 *
 *	Flag processor as up and running, and available
 *	for scheduling.
 */
void
processor_up(
	processor_t			processor)
{
	processor_set_t		pset;
	spl_t				s;

	s = splsched();
	init_ast_check(processor);
	pset = processor->processor_set;
	pset_lock(pset);
	++pset->online_processor_count;
	enqueue_tail(&pset->active_queue, (queue_entry_t)processor);
	processor->state = PROCESSOR_RUNNING;
	(void)hw_atomic_add(&processor_avail_count, 1);
	commpage_update_active_cpus();
	pset_unlock(pset);
	ml_cpu_up();
	splx(s);

#if CONFIG_DTRACE
	if (dtrace_cpu_state_changed_hook)
		(*dtrace_cpu_state_changed_hook)(processor->cpu_id, TRUE);
#endif
}
Exemple #4
0
/*
 * [internal]
 * Queue IOR on reply queue, to wait for TTY operation.
 * TTY must be locked (at spltty).
 */
void queue_delayed_reply(
	queue_t		qh,
	io_req_t	ior,
	boolean_t	(*io_done)(io_req_t) )
{
	ior->io_done = io_done;
	enqueue_tail(qh, (queue_entry_t)ior);
}
Exemple #5
0
/*
 *	thread_call_initialize:
 *
 *	Initialize this module, called
 *	early during system initialization.
 */
void
thread_call_initialize(void)
{
	thread_call_t			call;
	thread_call_group_t		group = &thread_call_group0;
	kern_return_t			result;
	thread_t				thread;
	int						i;
	spl_t					s;

	i = sizeof (thread_call_data_t);
	thread_call_zone = zinit(i, 4096 * i, 16 * i, "thread_call");
	zone_change(thread_call_zone, Z_CALLERACCT, FALSE);
	zone_change(thread_call_zone, Z_NOENCRYPT, TRUE);

	lck_attr_setdefault(&thread_call_lck_attr);
	lck_grp_attr_setdefault(&thread_call_lck_grp_attr);
	lck_grp_init(&thread_call_queues_lck_grp, "thread_call_queues", &thread_call_lck_grp_attr);
	lck_grp_init(&thread_call_lck_grp, "thread_call", &thread_call_lck_grp_attr);

#if defined(__i386__) || defined(__x86_64__)
        lck_mtx_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr);
#else
        lck_spin_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr);
#endif
	queue_init(&group->pending_queue);
	queue_init(&group->delayed_queue);

	s = splsched();
	thread_call_lock_spin();

	timer_call_setup(&group->delayed_timer, thread_call_delayed_timer, group);

	wait_queue_init(&group->idle_wqueue, SYNC_POLICY_FIFO);
	wait_queue_init(&group->daemon_wqueue, SYNC_POLICY_FIFO);

	queue_init(&thread_call_internal_queue);
	for (
	    	call = internal_call_storage;
			call < &internal_call_storage[internal_call_count];
			call++) {

		enqueue_tail(&thread_call_internal_queue, qe(call));
	}

	thread_call_daemon_awake = TRUE;

	thread_call_unlock();
	splx(s);

	result = kernel_thread_start_priority((thread_continue_t)thread_call_daemon, group, BASEPRI_PREEMPT + 1, &thread);
	if (result != KERN_SUCCESS)
		panic("thread_call_initialize");

	thread_deallocate(thread);
}
Exemple #6
0
static InterruptMemberNumber
hard_irq( InterruptSetMember ISTmember, void *ref_con, UInt32 the_int_count )
{
	channel_t *ch = channels;	/* fixme */
	int running, compl_cnt, event;
	request_t *r;
	
	/* Note: Q & A DV 34 explicitly forbids the usage of secondary
	 * interrupts on the page-fault path (it leads to deadlocks).
	 */
	/* Note: The OSI call _always_ modifies the return arguments */
	if( !OSI_ABlkIRQAck( ch->channel, &compl_cnt, &running, &event ) )
		return kIsrIsNotComplete;

	ch->running = running;

	if( event )
		handle_events( ch );

	/* handle overflow buffer */
	if( ch->obuf_cnt && compl_cnt - ch->obuf_completion >= 0 ) {
		if( ch->obuf_dest ) {
			char *dest = ch->obuf_dest, *src = ch->obuf;
			int cnt = ch->obuf_cnt;
			while( cnt-- )
				*dest++ = *src++;
		}
		/* XXX: insert optimization barrier here */
		*(volatile int*)&ch->obuf_cnt = 0;
	}

	/* process completions */
	while( (r=(request_t*)dequeue(&ch->compl_queue)) ) {
		IOCommandID cmdID;

		if( r->req_num - compl_cnt > 0 ) {
			enqueue_tail( &ch->compl_queue, (queue_el_t*)r );
			break;
		}
		/* free resources... */
		if( r->mem_prepared )
			CheckpointIO( r->ioprep.preparationID, 0 );

		if( r->ablk_req & (ABLK_READ_REQ | ABLK_WRITE_REQ) )
			((IOParam*)r->pb)->ioActCount = r->xfer_cnt;
		
		cmdID = r->cmdID;
		fifo_put( &ch->free_fifo, (fifo_el_t*)r );

		/* ...and complete */
		IOCommandIsComplete( cmdID, noErr );
	}
	process_request_queue( ch );

	return kIsrIsComplete;
}
Exemple #7
0
/*
 *	thread_stack_enqueue:
 *
 *	Enqueue a thread for stack allocation.
 *
 *	Called at splsched.
 */
void
thread_stack_enqueue(
	thread_t		thread)
{
	simple_lock(&thread_stack_lock);
	enqueue_tail(&thread_stack_queue, (queue_entry_t)thread);
	simple_unlock(&thread_stack_lock);

	thread_wakeup((event_t)&thread_stack_queue);
}
/*
 *	sched_traditional_processor_queue_shutdown:
 *
 *	Shutdown a processor run queue by
 *	re-dispatching non-bound threads.
 *
 *	Associated pset must be locked, and is
 *	returned unlocked.
 */
static void
sched_traditional_processor_queue_shutdown(processor_t processor)
{
	processor_set_t         pset    = processor->processor_set;
	run_queue_t             rq      = runq_for_processor(processor);
	queue_t                 queue   = rq->queues + rq->highq;
	int                     pri     = rq->highq;
	int                     count   = rq->count;
	thread_t                next, thread;
	queue_head_t            tqueue;

	queue_init(&tqueue);

	while (count > 0) {
		thread = (thread_t)(uintptr_t)queue_first(queue);
		while (!queue_end(queue, (queue_entry_t)thread)) {
			next = (thread_t)(uintptr_t)queue_next((queue_entry_t)thread);

			if (thread->bound_processor == PROCESSOR_NULL) {
				remqueue((queue_entry_t)thread);

				thread->runq = PROCESSOR_NULL;
				SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count);
				runq_consider_decr_bound_count(processor, thread);
				rq->count--;
				if (SCHED(priority_is_urgent)(pri)) {
					rq->urgency--; assert(rq->urgency >= 0);
				}
				if (queue_empty(queue)) {
					bitmap_clear(rq->bitmap, pri);
					rq->highq = bitmap_first(rq->bitmap, NRQS);
				}

				enqueue_tail(&tqueue, (queue_entry_t)thread);
			}
			count--;

			thread = next;
		}

		queue--; pri--;
	}

	pset_unlock(pset);

	while ((thread = (thread_t)(uintptr_t)dequeue_head(&tqueue)) != THREAD_NULL) {
		thread_lock(thread);

		thread_setrun(thread, SCHED_TAILQ);

		thread_unlock(thread);
	}
}
Exemple #9
0
/*
 *	thread_call_initialize:
 *
 *	Initialize this module, called
 *	early during system initialization.
 */
void
thread_call_initialize(void)
{
	thread_call_t			call;
	kern_return_t			result;
	thread_t			thread;
	int				i;

	i = sizeof (thread_call_data_t);
	thread_call_zone = zinit(i, 4096 * i, 16 * i, "thread_call");
	zone_change(thread_call_zone, Z_CALLERACCT, FALSE);
	zone_change(thread_call_zone, Z_NOENCRYPT, TRUE);

	lck_attr_setdefault(&thread_call_lck_attr);
	lck_grp_attr_setdefault(&thread_call_lck_grp_attr);
	lck_grp_init(&thread_call_queues_lck_grp, "thread_call_queues", &thread_call_lck_grp_attr);
	lck_grp_init(&thread_call_lck_grp, "thread_call", &thread_call_lck_grp_attr);

#if defined(__i386__) || defined(__x86_64__)
        lck_mtx_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr);
#else
        lck_spin_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr);
#endif

	nanotime_to_absolutetime(0, THREAD_CALL_DEALLOC_INTERVAL_NS, &thread_call_dealloc_interval_abs);
	wait_queue_init(&daemon_wqueue, SYNC_POLICY_FIFO);

	thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_LOW], THREAD_CALL_PRIORITY_LOW, 0, TRUE);
	thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_USER], THREAD_CALL_PRIORITY_USER, 0, TRUE);
	thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_KERNEL], THREAD_CALL_PRIORITY_KERNEL, 1, TRUE);
	thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_HIGH], THREAD_CALL_PRIORITY_HIGH, THREAD_CALL_THREAD_MIN, FALSE);

	disable_ints_and_lock();

	queue_init(&thread_call_internal_queue);
	for (
			call = internal_call_storage;
			call < &internal_call_storage[INTERNAL_CALL_COUNT];
			call++) {

		enqueue_tail(&thread_call_internal_queue, qe(call));
	}

	thread_call_daemon_awake = TRUE;

	enable_ints_and_unlock();

	result = kernel_thread_start_priority((thread_continue_t)thread_call_daemon, NULL, BASEPRI_PREEMPT + 1, &thread);
	if (result != KERN_SUCCESS)
		panic("thread_call_initialize");

	thread_deallocate(thread);
}
static void 
iopa_free(uintptr_t addr, vm_size_t bytes)
{
    io_pagealloc_t * pa;
    uint32_t         count;
    uintptr_t        chunk;

    if (!bytes) bytes = 1;

    chunk = (addr & page_mask);
    assert(0 == (chunk & (kIOPageAllocChunkBytes - 1)));

    pa = (typeof(pa)) (addr | (page_size - kIOPageAllocChunkBytes));
    assert(kIOPageAllocSignature == pa->signature);

    count = (bytes + kIOPageAllocChunkBytes - 1) / kIOPageAllocChunkBytes;
    chunk /= kIOPageAllocChunkBytes;

    IOSimpleLockLock(gIOPageAllocLock);
    if (!pa->avail)
    {
	assert(!pa->link.next);
	enqueue_tail(&gIOPageAllocList, &pa->link);
    }
    pa->avail |= ((-1ULL << (64 - count)) >> chunk);
    if (pa->avail != -2ULL) pa = 0;
    else
    {
        remque(&pa->link);
        pa->link.next = 0;
        pa->signature = 0;
	gIOPageAllocCount--;
    }
    gIOPageAllocBytes -= bytes;
    IOSimpleLockUnlock(gIOPageAllocLock);
    if (pa) iopa_freepage(pa);

#if IOALLOCDEBUG
    debug_iomalloc_size -= bytes;
#endif
    IOStatisticsAlloc(kIOStatisticsFreeAligned, bytes);
}
Exemple #11
0
kern_return_t
host_request_notification(
	host_t					host,
	host_flavor_t			notify_type,
	ipc_port_t				port)
{
	host_notify_t		entry;

	if (host == HOST_NULL)
		return (KERN_INVALID_ARGUMENT);

	if (!IP_VALID(port))
		return (KERN_INVALID_CAPABILITY);

	if (notify_type > HOST_NOTIFY_TYPE_MAX || notify_type < 0)
		return (KERN_INVALID_ARGUMENT);

	entry = (host_notify_t)zalloc(host_notify_zone);
	if (entry == NULL)
		return (KERN_RESOURCE_SHORTAGE);

	mutex_lock(&host_notify_lock);

	ip_lock(port);
	if (!ip_active(port) || ip_kotype(port) != IKOT_NONE) {
		ip_unlock(port);

		mutex_unlock(&host_notify_lock);
		zfree(host_notify_zone, (vm_offset_t)entry);

		return (KERN_FAILURE);
	}

	entry->port = port;
	ipc_kobject_set_atomically(port, (ipc_kobject_t)entry, IKOT_HOST_NOTIFY);
	ip_unlock(port);

	enqueue_tail(&host_notify_queue[notify_type], (queue_entry_t)entry);
	mutex_unlock(&host_notify_lock);

	return (KERN_SUCCESS);
}
Exemple #12
0
void
lck_mod_init(
	void)
{
	/*
	 * Obtain "lcks" options:this currently controls lock statistics
	 */
	if (!PE_parse_boot_argn("lcks", &LcksOpts, sizeof (LcksOpts)))
		LcksOpts = 0;


#if (DEVELOPMENT || DEBUG) && defined(__x86_64__)
	if (!PE_parse_boot_argn("-disable_mtx_chk", &LckDisablePreemptCheck, sizeof (LckDisablePreemptCheck)))
		LckDisablePreemptCheck = 0;
#endif /* (DEVELOPMENT || DEBUG) && defined(__x86_64__) */

	queue_init(&lck_grp_queue);
	
	/* 
	 * Need to bootstrap the LockCompatGroup instead of calling lck_grp_init() here. This avoids
	 * grabbing the lck_grp_lock before it is initialized.
	 */
	
	bzero(&LockCompatGroup, sizeof(lck_grp_t));
	(void) strncpy(LockCompatGroup.lck_grp_name, "Compatibility APIs", LCK_GRP_MAX_NAME);
	
	if (LcksOpts & enaLkStat)
		LockCompatGroup.lck_grp_attr = LCK_GRP_ATTR_STAT;
    else
		LockCompatGroup.lck_grp_attr = LCK_ATTR_NONE;
	
	LockCompatGroup.lck_grp_refcnt = 1;
	
	enqueue_tail(&lck_grp_queue, (queue_entry_t)&LockCompatGroup);
	lck_grp_cnt = 1;
	
	lck_grp_attr_setdefault(&LockDefaultGroupAttr);
	lck_attr_setdefault(&LockDefaultLckAttr);
	
	lck_mtx_init_ext(&lck_grp_lock, &lck_grp_lock_ext, &LockCompatGroup, &LockDefaultLckAttr);
}
Exemple #13
0
/*
 *	Locate and steal a thread, beginning
 *	at the pset.
 *
 *	The pset must be locked, and is returned
 *	unlocked.
 *
 *	Returns the stolen thread, or THREAD_NULL on
 *	failure.
 */
static thread_t
sched_traditional_steal_thread(processor_set_t pset)
{
	processor_set_t nset, cset = pset;
	processor_t     processor;
	thread_t        thread;

	do {
		processor = (processor_t)(uintptr_t)queue_first(&cset->active_queue);
		while (!queue_end(&cset->active_queue, (queue_entry_t)processor)) {
			if (runq_for_processor(processor)->count > 0) {
				thread = sched_traditional_steal_processor_thread(processor);
				if (thread != THREAD_NULL) {
					remqueue((queue_entry_t)processor);
					enqueue_tail(&cset->active_queue, (queue_entry_t)processor);

					pset_unlock(cset);

					return (thread);
				}
			}

			processor = (processor_t)(uintptr_t)queue_next((queue_entry_t)processor);
		}

		nset = next_pset(cset);

		if (nset != pset) {
			pset_unlock(cset);

			cset = nset;
			pset_lock(cset);
		}
	} while (nset != pset);

	pset_unlock(cset);

	return (THREAD_NULL);
}
Exemple #14
0
void
lck_grp_init(lck_grp_t * grp, const char * grp_name, lck_grp_attr_t * attr)
{
	/* make sure locking infrastructure has been initialized */
	assert(lck_grp_cnt > 0);

	bzero((void *)grp, sizeof(lck_grp_t));

	(void)strlcpy(grp->lck_grp_name, grp_name, LCK_GRP_MAX_NAME);

	if (attr != LCK_GRP_ATTR_NULL)
		grp->lck_grp_attr = attr->grp_attr_val;
	else if (LcksOpts & enaLkStat)
		grp->lck_grp_attr = LCK_GRP_ATTR_STAT;
	else
		grp->lck_grp_attr = LCK_ATTR_NONE;

	grp->lck_grp_refcnt = 1;

	lck_mtx_lock(&lck_grp_lock);
	enqueue_tail(&lck_grp_queue, (queue_entry_t)grp);
	lck_grp_cnt++;
	lck_mtx_unlock(&lck_grp_lock);
}
Exemple #15
0
/*
 *	Routine:	wait_queue_assert_wait64_locked
 *	Purpose:
 *		Insert the current thread into the supplied wait queue
 *		waiting for a particular event to be posted to that queue.
 *
 *	Conditions:
 *		The wait queue is assumed locked.
 *		The waiting thread is assumed locked.
 *
 */
__private_extern__ wait_result_t
wait_queue_assert_wait64_locked(
	wait_queue_t wq,
	event64_t event,
	wait_interrupt_t interruptible,
	thread_t thread)
{
	wait_result_t wait_result;

	if (!wait_queue_assert_possible(thread))
		panic("wait_queue_assert_wait64_locked");

	if (wq->wq_type == _WAIT_QUEUE_SET_inited) {
		wait_queue_set_t wqs = (wait_queue_set_t)wq;

		if (wqs->wqs_isprepost && wqs->wqs_refcount > 0)
			return(THREAD_AWAKENED);
	}
	  
	/*
	 * This is the extent to which we currently take scheduling attributes
	 * into account.  If the thread is vm priviledged, we stick it at
	 * the front of the queue.  Later, these queues will honor the policy
	 * value set at wait_queue_init time.
	 */
	wait_result = thread_mark_wait_locked(thread, interruptible);
	if (wait_result == THREAD_WAITING) {
		if (thread->vm_privilege)
			enqueue_head(&wq->wq_queue, (queue_entry_t) thread);
		else
			enqueue_tail(&wq->wq_queue, (queue_entry_t) thread);
		thread->wait_event = event;
		thread->wait_queue = wq;
	}
	return(wait_result);
}
Exemple #16
0
OSMallocTag
OSMalloc_Tagalloc(
	const char			*str,
	uint32_t			flags)
{
	OSMallocTag       OSMTag;

	OSMTag = (OSMallocTag)kalloc(sizeof(*OSMTag));

	bzero((void *)OSMTag, sizeof(*OSMTag));

	if (flags & OSMT_PAGEABLE)
		OSMTag->OSMT_attr = OSMT_ATTR_PAGEABLE;

	OSMTag->OSMT_refcnt = 1;

	strncpy(OSMTag->OSMT_name, str, OSMT_MAX_NAME);

	OSMalloc_tag_spin_lock();
	enqueue_tail(&OSMalloc_tag_list, (queue_entry_t)OSMTag);
	OSMalloc_tag_unlock();
	OSMTag->OSMT_state = OSMT_VALID;
	return(OSMTag);
}
Exemple #17
0
/* returns a non-zoro status if the request ring is sturated */
static inline int
process_request( channel_t *ch, request_t *req )
{
	ablk_req_head_t *prev, *d;
	ablk_sg_t *dd;
	int next, count, ret=0;
	
	for( ;; ) {
		next = ((ch->ri+1) & RING_MASK);
		prev = &ch->ring[ch->ri];
		d = &ch->ring[next];

		/* hardware saturated? */
		if( d->flags ) {
			enqueue_tail( &ch->req_queue, (queue_el_t*)req );
			ret = 1;
			break;
		}
		ch->ri = next;
		
		/* set the completion request number */
		req->req_num = ++(ch->req_count);
		d->proceed = 0;

		/* start new command */
		if( req->xfer_cnt < 0 ) {
			req->xfer_cnt = 0;
			d->flags = req->ablk_req;
			d->param = req->param;	/* block */
			d->unit = req->unit;
			if( !req->total_cnt ) {
				d->flags |= ABLK_RAISE_IRQ;
				enqueue( &ch->compl_queue, (queue_el_t*)req );
				START_REQUEST(prev);
				break;
			}
			START_REQUEST(prev);
			continue;
		}
		
		/* handle scatter and gather buffers */
		dd = (ablk_sg_t*)d;

		if( !(dd->buf=get_physical_segment(req, &count)) ) {
			/* use the overflow buffer */
			if( ch->obuf_cnt ) {
				/* overflow buf already in use... back off */
				enqueue_tail( &ch->req_queue, (queue_el_t*)req );
				ch->req_count--;
				ch->ri = ((ch->ri-1) & RING_MASK);
				ret = 1;
				break;
			}
			if( (count=req->total_cnt-req->xfer_cnt) > OBUF_SIZE )
				count = OBUF_SIZE;
			dd->buf = ch->obuf_phys;
			ch->obuf_cnt = count;
			ch->obuf_completion = req->req_num;
			if( req->ablk_req == ABLK_READ_REQ ) {
				ch->obuf_dest = req->pb->ioParam.ioBuffer + req->xfer_cnt;
			} else if( req->ablk_req == ABLK_WRITE_REQ ) {
				char *src = req->pb->ioParam.ioBuffer + req->xfer_cnt;
				char *dest = ch->obuf;
				int cnt = count;

				ch->obuf_dest = NULL;
				while( cnt-- )
					*dest++ = *src++;
			}
		}
		req->xfer_cnt += count;
		dd->count = count;
		
		/* request complete? */
		if( req->xfer_cnt == req->total_cnt ) {
			dd->flags = ABLK_SG_BUF | ABLK_RAISE_IRQ;
			enqueue( &ch->compl_queue, (queue_el_t*)req );
			START_REQUEST(prev);
			break;
		}
		dd->flags = ABLK_SG_BUF;
		START_REQUEST(prev);
	}

	if( !ch->running ) {
		ch->running = 1;
		OSI_ABlkKick( ch->channel );
	}
	return ret;
}
Exemple #18
0
/*
 *	Routine:	wait_queue_assert_wait64_locked
 *	Purpose:
 *		Insert the current thread into the supplied wait queue
 *		waiting for a particular event to be posted to that queue.
 *
 *	Conditions:
 *		The wait queue is assumed locked.
 *		The waiting thread is assumed locked.
 *
 */
__private_extern__ wait_result_t
wait_queue_assert_wait64_locked(
	wait_queue_t wq,
	event64_t event,
	wait_interrupt_t interruptible,
	uint64_t deadline,
	thread_t thread)
{
	wait_result_t wait_result;
	boolean_t realtime;

	if (!wait_queue_assert_possible(thread))
		panic("wait_queue_assert_wait64_locked");

	if (wq->wq_type == _WAIT_QUEUE_SET_inited) {
		wait_queue_set_t wqs = (wait_queue_set_t)wq;

		if (event == NO_EVENT64 && wqs_is_preposted(wqs))
			return(THREAD_AWAKENED);
	}

	/*
	 * Realtime threads get priority for wait queue placements.
	 * This allows wait_queue_wakeup_one to prefer a waiting
	 * realtime thread, similar in principle to performing
	 * a wait_queue_wakeup_all and allowing scheduler prioritization
	 * to run the realtime thread, but without causing the
	 * lock contention of that scenario.
	 */
	realtime = (thread->sched_pri >= BASEPRI_REALTIME);

	/*
	 * This is the extent to which we currently take scheduling attributes
	 * into account.  If the thread is vm priviledged, we stick it at
	 * the front of the queue.  Later, these queues will honor the policy
	 * value set at wait_queue_init time.
	 */
	wait_result = thread_mark_wait_locked(thread, interruptible);
	if (wait_result == THREAD_WAITING) {
		if (!wq->wq_fifo
			|| (thread->options & TH_OPT_VMPRIV)
			|| realtime)
			enqueue_head(&wq->wq_queue, (queue_entry_t) thread);
		else
			enqueue_tail(&wq->wq_queue, (queue_entry_t) thread);

		thread->wait_event = event;
		thread->wait_queue = wq;

		if (deadline != 0) {
			uint32_t flags;

			flags = realtime ? TIMER_CALL_CRITICAL : 0;

			if (!timer_call_enter(&thread->wait_timer, deadline, flags))
				thread->wait_timer_active++;
			thread->wait_timer_is_set = TRUE;
		}
	}
	return(wait_result);
}