Ejemplo n.º 1
0
/*
 *	Routine: convert_port_to_locked_task
 *	Purpose:
 *		Internal helper routine to convert from a port to a locked
 *		task.  Used by several routines that try to convert from a
 *		task port to a reference on some task related object.
 *	Conditions:
 *		Nothing locked, blocking OK.
 */
task_t
convert_port_to_locked_task(ipc_port_t port)
{
        int try_failed_count = 0;

	while (IP_VALID(port)) {
		task_t task;

		ip_lock(port);
		if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) {
			ip_unlock(port);
			return TASK_NULL;
		}
		task = (task_t) port->ip_kobject;
		assert(task != TASK_NULL);

		/*
		 * Normal lock ordering puts task_lock() before ip_lock().
		 * Attempt out-of-order locking here.
		 */
		if (task_lock_try(task)) {
			ip_unlock(port);
			return(task);
		}
		try_failed_count++;

		ip_unlock(port);
		mutex_pause(try_failed_count);
	}
	return TASK_NULL;
}
Ejemplo n.º 2
0
/*
 *	Routine:	semaphore_dereference
 *
 *	Release a reference on a semaphore.  If this is the last reference,
 *	the semaphore data structure is deallocated.
 */
void
semaphore_dereference(
	semaphore_t		semaphore)
{
	uint32_t collisions;
	spl_t spl_level;

	if (semaphore == NULL)
		return;

	if (hw_atomic_sub(&semaphore->ref_count, 1) != 0)
		return;

	/*
	 * Last ref, clean up the port [if any]
	 * associated with the semaphore, destroy
	 * it (if still active) and then free
	 * the semaphore.
	 */
	ipc_port_t port = semaphore->port;

	if (IP_VALID(port)) {
		assert(!port->ip_srights);
		ipc_port_dealloc_kernel(port);
	}

	/*
	 * Lock the semaphore to lock in the owner task reference.
	 * Then continue to try to lock the task (inverse order).
	 */
	spl_level = splsched();
	semaphore_lock(semaphore);
	for (collisions = 0; semaphore->active; collisions++) {
		task_t task = semaphore->owner;

		assert(task != TASK_NULL);
		
		if (task_lock_try(task)) {
			semaphore_destroy_internal(task, semaphore);
			/* semaphore unlocked */
			splx(spl_level);
			task_unlock(task);
			goto out;
		}
		
		/* failed to get out-of-order locks */
		semaphore_unlock(semaphore);
		splx(spl_level);
		mutex_pause(collisions);
		spl_level = splsched();
		semaphore_lock(semaphore);
	}
	semaphore_unlock(semaphore);
	splx(spl_level);

 out:
	zfree(semaphore_zone, semaphore);
}
Ejemplo n.º 3
0
/*
 * Synchronize with migration given a pointer to a shuttle (instead of an
 * activation).  Called with nothing locked; returns with all
 * "appropriate" thread-related locks held (see act_lock_thread()).
 */
thread_act_t
thread_lock_act(
	thread_t	thread)
{
	thread_act_t	thr_act;

	while (1) {
		thr_act = thread->top_act;
		if (!thr_act)
			break;
		if (!act_lock_try(thr_act)) {
			mutex_pause();
			continue;
		}
		break;
	}
	return (thr_act);
}
Ejemplo n.º 4
0
boolean_t
ref_pset_port_locked(ipc_port_t port, boolean_t matchn, processor_set_t *ppset)
{
	processor_set_t pset;

	pset = PROCESSOR_SET_NULL;
	if (ip_active(port) &&
		((ip_kotype(port) == IKOT_PSET) ||
		 (matchn && (ip_kotype(port) == IKOT_PSET_NAME)))) {
		pset = (processor_set_t) port->ip_kobject;
		if (!pset_lock_try(pset)) {
			ip_unlock(port);
			mutex_pause();
			return (FALSE);
		}
		pset->ref_count++;
		pset_unlock(pset);
	}
	*ppset = pset;
	ip_unlock(port);
	return (TRUE);
}
Ejemplo n.º 5
0
void
mutex_yield(
	    mutex_t	*mutex)
{
        lck_mtx_t	*lck;

#if DEBUG
	_mutex_assert(mutex, MA_OWNED);
#endif /* DEBUG */

	lck = (lck_mtx_t *) mutex;
	if (lck->lck_mtx_tag == LCK_MTX_TAG_INDIRECT)
	        lck = &lck->lck_mtx_ptr->lck_mtx;

	if (! lck->lck_mtx_waiters) {
	        mutex_yield_no_wait++;
	} else {
	        mutex_yield_wait++;
		mutex_unlock(mutex);
		mutex_pause(0);
		mutex_lock(mutex);
	}
}
Ejemplo n.º 6
0
void
lck_mtx_yield(
	    lck_mtx_t	*lck)
{
	int	waiters;
	
#if DEBUG
	lck_mtx_assert(lck, LCK_MTX_ASSERT_OWNED);
#endif /* DEBUG */
	
	if (lck->lck_mtx_tag == LCK_MTX_TAG_INDIRECT)
	        waiters = lck->lck_mtx_ptr->lck_mtx.lck_mtx_waiters;
	else
	        waiters = lck->lck_mtx_waiters;

	if ( !waiters) {
	        mutex_yield_no_wait++;
	} else {
	        mutex_yield_wait++;
		lck_mtx_unlock(lck);
		mutex_pause(0);
		lck_mtx_lock(lck);
	}
}
Ejemplo n.º 7
0
wait_result_t
ipc_mqueue_receive_on_thread(
        ipc_mqueue_t            mqueue,
	mach_msg_option_t       option,
	mach_msg_size_t         max_size,
	mach_msg_timeout_t      rcv_timeout,
	int                     interruptible,
	thread_t                thread)
{
	ipc_kmsg_queue_t        kmsgs;
	wait_result_t           wresult;
	uint64_t		deadline;
	spl_t                   s;

	s = splsched();
	imq_lock(mqueue);
	
	if (imq_is_set(mqueue)) {
		queue_t q;

		q = &mqueue->imq_preposts;

		/*
		 * If we are waiting on a portset mqueue, we need to see if
		 * any of the member ports have work for us.  Ports that
		 * have (or recently had) messages will be linked in the
		 * prepost queue for the portset. By holding the portset's
		 * mqueue lock during the search, we tie up any attempts by
		 * mqueue_deliver or portset membership changes that may
		 * cross our path.
		 */
	search_set:
		while(!queue_empty(q)) {
			wait_queue_link_t wql;
			ipc_mqueue_t port_mq;

			queue_remove_first(q, wql, wait_queue_link_t, wql_preposts);
			assert(!wql_is_preposted(wql));

			/*
			 * This is a lock order violation, so we have to do it
			 * "softly," putting the link back on the prepost list
			 * if it fails (at the tail is fine since the order of
			 * handling messages from different sources in a set is
			 * not guaranteed and we'd like to skip to the next source
			 * if one is available).
			 */
			port_mq = (ipc_mqueue_t)wql->wql_queue;
			if (!imq_lock_try(port_mq)) {
				queue_enter(q, wql, wait_queue_link_t, wql_preposts);
				imq_unlock(mqueue);
				splx(s);
				mutex_pause(0);
				s = splsched();
				imq_lock(mqueue);
				goto search_set; /* start again at beginning - SMP */
			}

			/*
			 * If there are no messages on this queue, just skip it
			 * (we already removed the link from the set's prepost queue).
			 */
			kmsgs = &port_mq->imq_messages;
			if (ipc_kmsg_queue_first(kmsgs) == IKM_NULL) {
				imq_unlock(port_mq);
				continue;
			}

			/*
			 * There are messages, so reinsert the link back
			 * at the tail of the preposted queue (for fairness)
			 * while we still have the portset mqueue locked.
			 */
			queue_enter(q, wql, wait_queue_link_t, wql_preposts);
			imq_unlock(mqueue);

			/*
			 * Continue on to handling the message with just
			 * the port mqueue locked.
			 */
			ipc_mqueue_select_on_thread(port_mq, option, max_size, thread);
			imq_unlock(port_mq);
			splx(s);
			return THREAD_NOT_WAITING;
			
		}

	} else {

		/*
		 * Receive on a single port. Just try to get the messages.
		 */
	  	kmsgs = &mqueue->imq_messages;
		if (ipc_kmsg_queue_first(kmsgs) != IKM_NULL) {
			ipc_mqueue_select_on_thread(mqueue, option, max_size, thread);
			imq_unlock(mqueue);
			splx(s);
			return THREAD_NOT_WAITING;
		}
	}
	
	/*
	 * Looks like we'll have to block.  The mqueue we will
	 * block on (whether the set's or the local port's) is
	 * still locked.
	 */
	if (option & MACH_RCV_TIMEOUT) {
		if (rcv_timeout == 0) {
			imq_unlock(mqueue);
			splx(s);
			thread->ith_state = MACH_RCV_TIMED_OUT;
			return THREAD_NOT_WAITING;
		}
	}

	thread_lock(thread);
	thread->ith_state = MACH_RCV_IN_PROGRESS;
	thread->ith_option = option;
	thread->ith_msize = max_size;

	if (option & MACH_RCV_TIMEOUT)
		clock_interval_to_deadline(rcv_timeout, 1000*NSEC_PER_USEC, &deadline);
	else
		deadline = 0;

	wresult = wait_queue_assert_wait64_locked(&mqueue->imq_wait_queue,
						  IPC_MQUEUE_RECEIVE,
						  interruptible, 
						  TIMEOUT_URGENCY_USER_NORMAL,
						  deadline, 0,
						  thread);
	/* preposts should be detected above, not here */
	if (wresult == THREAD_AWAKENED)
		panic("ipc_mqueue_receive_on_thread: sleep walking");

	thread_unlock(thread);
	imq_unlock(mqueue);
	splx(s);
	return wresult;
}