Пример #1
0
void condvar_wait(struct condvar *cv, struct mutex *m)
{
	uint32_t old_itr_status;
	struct wait_queue_elem wqe;

	old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);

	/* Link this condvar to this mutex until reinitialized */
	cpu_spin_lock(&cv->spin_lock);
	TEE_ASSERT(!cv->m || cv->m == m);
	cv->m = m;
	cpu_spin_unlock(&cv->spin_lock);

	cpu_spin_lock(&m->spin_lock);

	/* Add to mutex wait queue as a condvar waiter */
	wq_wait_init_condvar(&m->wq, &wqe, cv);

	/* Unlock the mutex */
	TEE_ASSERT(m->value == MUTEX_VALUE_LOCKED);
	thread_rem_mutex(m);
	m->value = MUTEX_VALUE_UNLOCKED;

	cpu_spin_unlock(&m->spin_lock);

	thread_unmask_exceptions(old_itr_status);

	/* Wake eventual waiters */
	wq_wake_one(&m->wq);

	wq_wait_final(&m->wq, &wqe);

	mutex_lock(m);
}
Пример #2
0
static void __mutex_lock(struct mutex *m, const char *fname, int lineno)
{
	assert_have_no_spinlock();
	assert(thread_get_id_may_fail() != -1);
	assert(thread_is_in_normal_mode());

	while (true) {
		uint32_t old_itr_status;
		enum mutex_value old_value;
		struct wait_queue_elem wqe;
		int owner = MUTEX_OWNER_ID_NONE;

		/*
		 * If the mutex is locked we need to initialize the wqe
		 * before releasing the spinlock to guarantee that we don't
		 * miss the wakeup from mutex_unlock().
		 *
		 * If the mutex is unlocked we don't need to use the wqe at
		 * all.
		 */

		old_itr_status = cpu_spin_lock_xsave(&m->spin_lock);

		old_value = m->value;
		if (old_value == MUTEX_VALUE_LOCKED) {
			wq_wait_init(&m->wq, &wqe);
			owner = m->owner_id;
			assert(owner != thread_get_id_may_fail());
		} else {
			m->value = MUTEX_VALUE_LOCKED;
			thread_add_mutex(m);
		}

		cpu_spin_unlock_xrestore(&m->spin_lock, old_itr_status);

		if (old_value == MUTEX_VALUE_LOCKED) {
			/*
			 * Someone else is holding the lock, wait in normal
			 * world for the lock to become available.
			 */
			wq_wait_final(&m->wq, &wqe, m, owner, fname, lineno);
		} else
			return;
	}
}
Пример #3
0
static void __mutex_lock(struct mutex *m, const char *fname, int lineno)
{
	assert_have_no_spinlock();
	assert(thread_get_id_may_fail() != -1);
	assert(thread_is_in_normal_mode());

	mutex_lock_check(m);

	while (true) {
		uint32_t old_itr_status;
		bool can_lock;
		struct wait_queue_elem wqe;

		/*
		 * If the mutex is locked we need to initialize the wqe
		 * before releasing the spinlock to guarantee that we don't
		 * miss the wakeup from mutex_unlock().
		 *
		 * If the mutex is unlocked we don't need to use the wqe at
		 * all.
		 */

		old_itr_status = cpu_spin_lock_xsave(&m->spin_lock);

		can_lock = !m->state;
		if (!can_lock) {
			wq_wait_init(&m->wq, &wqe, false /* wait_read */);
		} else {
			m->state = -1; /* write locked */
		}

		cpu_spin_unlock_xrestore(&m->spin_lock, old_itr_status);

		if (!can_lock) {
			/*
			 * Someone else is holding the lock, wait in normal
			 * world for the lock to become available.
			 */
			wq_wait_final(&m->wq, &wqe, m, fname, lineno);
		} else
			return;
	}
}
Пример #4
0
void mutex_lock(struct mutex *m)
{
	while (true) {
		uint32_t old_itr_status;
		enum mutex_value old_value;
		struct wait_queue_elem wqe;

		/*
		 * If the mutex is locked we need to initialize the wqe
		 * before releasing the spinlock to guarantee that we don't
		 * miss the wakeup from mutex_unlock().
		 *
		 * If the mutex is unlocked we don't need to use the wqe at
		 * all.
		 */

		old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
		cpu_spin_lock(&m->spin_lock);

		old_value = m->value;
		if (old_value == MUTEX_VALUE_LOCKED) {
			wq_wait_init(&m->wq, &wqe);
		} else {
			m->value = MUTEX_VALUE_LOCKED;
			thread_add_mutex(m);
		}

		cpu_spin_unlock(&m->spin_lock);
		thread_unmask_exceptions(old_itr_status);

		if (old_value == MUTEX_VALUE_LOCKED) {
			/*
			 * Someone else is holding the lock, wait in normal
			 * world for the lock to become available.
			 */
			wq_wait_final(&m->wq, &wqe);
		} else
			return;
	}
}