Example #1
0
/*
 * Exclusive-lock a mutex, block until acquired.  Recursion is allowed.
 *
 * Returns 0 on success, or the tsleep() return code on failure.
 * An error can only be returned if PCATCH is specified in the flags.
 */
static __inline int
__mtx_lock_ex(mtx_t mtx, mtx_link_t link, const char *ident, int flags, int to)
{
	u_int	lock;
	u_int	nlock;
	int	error;

	for (;;) {
		lock = mtx->mtx_lock;
		if (lock == 0) {
			nlock = MTX_EXCLUSIVE | 1;
			if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
				mtx->mtx_owner = curthread;
				error = 0;
				break;
			}
		} else if ((lock & MTX_EXCLUSIVE) &&
			   mtx->mtx_owner == curthread) {
			KKASSERT((lock & MTX_MASK) != MTX_MASK);
			nlock = lock + 1;
			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
				error = 0;
				break;
			}
		} else {
			/*
			 * Clearing MTX_EXLINK in lock causes us to loop until
			 * MTX_EXLINK is available.  However, to avoid
			 * unnecessary cpu cache traffic we poll instead.
			 *
			 * Setting MTX_EXLINK in nlock causes us to loop until
			 * we can acquire MTX_EXLINK.
			 *
			 * Also set MTX_EXWANTED coincident with EXLINK, if
			 * not already set.
			 */
			thread_t td;

			if (lock & MTX_EXLINK) {
				cpu_pause();
				++mtx_collision_count;
				continue;
			}
			td = curthread;
			/*lock &= ~MTX_EXLINK;*/
			nlock = lock | MTX_EXWANTED | MTX_EXLINK;
			++td->td_critcount;
			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
				/*
				 * Check for early abort
				 */
				if (link->state == MTX_LINK_ABORTED) {
					atomic_clear_int(&mtx->mtx_lock,
							 MTX_EXLINK);
					--td->td_critcount;
					error = ENOLCK;
					if (mtx->mtx_link == NULL) {
						atomic_clear_int(&mtx->mtx_lock,
								 MTX_EXWANTED);
					}
					break;
				}

				/*
				 * Success.  Link in our structure then
				 * release EXLINK and sleep.
				 */
				link->owner = td;
				link->state = MTX_LINK_LINKED;
				if (mtx->mtx_link) {
					link->next = mtx->mtx_link;
					link->prev = link->next->prev;
					link->next->prev = link;
					link->prev->next = link;
				} else {
					link->next = link;
					link->prev = link;
					mtx->mtx_link = link;
				}
				tsleep_interlock(link, 0);
				atomic_clear_int(&mtx->mtx_lock, MTX_EXLINK);
				--td->td_critcount;

				error = tsleep(link, flags, ident, to);
				++mtx_contention_count;

				/*
				 * Normal unlink, we should own the exclusive
				 * lock now.
				 */
				if (link->state == MTX_LINK_LINKED)
					mtx_delete_link(mtx, link);
				if (link->state == MTX_LINK_ACQUIRED) {
					KKASSERT(mtx->mtx_owner == link->owner);
					error = 0;
					break;
				}

				/*
				 * Aborted lock (mtx_abort_ex called).
				 */
				if (link->state == MTX_LINK_ABORTED) {
					error = ENOLCK;
					break;
				}

				/*
				 * tsleep error, else retry.
				 */
				if (error)
					break;
			} else {
				--td->td_critcount;
			}
		}
		++mtx_collision_count;
	}
	return (error);
}
Example #2
0
/*
 * Wait for async lock completion or abort.  Returns ENOLCK if an abort
 * occurred.
 */
int
mtx_wait_link(mtx_t *mtx, mtx_link_t *link, int flags, int to)
{
	indefinite_info_t info;
	int error;

	indefinite_init(&info, mtx->mtx_ident, 1,
			((link->state & MTX_LINK_LINKED_SH) ? 'm' : 'M'));

	/*
	 * Sleep.  Handle false wakeups, interruptions, etc.
	 * The link may also have been aborted.  The LINKED
	 * bit was set by this cpu so we can test it without
	 * fences.
	 */
	error = 0;
	while (link->state & MTX_LINK_LINKED) {
		tsleep_interlock(link, 0);
		cpu_lfence();
		if (link->state & MTX_LINK_LINKED) {
			error = tsleep(link, flags | PINTERLOCKED,
				       mtx->mtx_ident, to);
			if (error)
				break;
		}
		if ((mtx->mtx_flags & MTXF_NOCOLLSTATS) == 0)
			indefinite_check(&info);
	}

	/*
	 * We need at least a lfence (load fence) to ensure our cpu does not
	 * reorder loads (of data outside the lock structure) prior to the
	 * remote cpu's release, since the above test may have run without
	 * any atomic interactions.
	 *
	 * If we do not do this then state updated by the other cpu before
	 * releasing its lock may not be read cleanly by our cpu when this
	 * function returns.  Even though the other cpu ordered its stores,
	 * our loads can still be out of order.
	 */
	cpu_mfence();

	/*
	 * We are done, make sure the link structure is unlinked.
	 * It may still be on the list due to e.g. EINTR or
	 * EWOULDBLOCK.
	 *
	 * It is possible for the tsleep to race an ABORT and cause
	 * error to be 0.
	 *
	 * The tsleep() can be woken up for numerous reasons and error
	 * might be zero in situations where we intend to return an error.
	 *
	 * (This is the synchronous case so state cannot be CALLEDBACK)
	 */
	switch(link->state) {
	case MTX_LINK_ACQUIRED:
	case MTX_LINK_CALLEDBACK:
		error = 0;
		break;
	case MTX_LINK_ABORTED:
		error = ENOLCK;
		break;
	case MTX_LINK_LINKED_EX:
	case MTX_LINK_LINKED_SH:
		mtx_delete_link(mtx, link);
		/* fall through */
	default:
		if (error == 0)
			error = EWOULDBLOCK;
		break;
	}

	/*
	 * Clear state on status returned.
	 */
	link->state = MTX_LINK_IDLE;

	if ((mtx->mtx_flags & MTXF_NOCOLLSTATS) == 0)
		indefinite_done(&info);

	return error;
}