void condvar_wait(struct condvar *cv, struct mutex *m) { uint32_t old_itr_status; struct wait_queue_elem wqe; old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL); /* Link this condvar to this mutex until reinitialized */ cpu_spin_lock(&cv->spin_lock); TEE_ASSERT(!cv->m || cv->m == m); cv->m = m; cpu_spin_unlock(&cv->spin_lock); cpu_spin_lock(&m->spin_lock); /* Add to mutex wait queue as a condvar waiter */ wq_wait_init_condvar(&m->wq, &wqe, cv); /* Unlock the mutex */ TEE_ASSERT(m->value == MUTEX_VALUE_LOCKED); thread_rem_mutex(m); m->value = MUTEX_VALUE_UNLOCKED; cpu_spin_unlock(&m->spin_lock); thread_unmask_exceptions(old_itr_status); /* Wake eventual waiters */ wq_wake_one(&m->wq); wq_wait_final(&m->wq, &wqe); mutex_lock(m); }
static void cv_signal(struct condvar *cv, bool only_one) { uint32_t old_itr_status; struct mutex *m; old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL); cpu_spin_lock(&cv->spin_lock); m = cv->m; cpu_spin_unlock(&cv->spin_lock); thread_unmask_exceptions(old_itr_status); if (m) wq_promote_condvar(&m->wq, cv, only_one); }
void mutex_unlock(struct mutex *m) { uint32_t old_itr_status; old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL); cpu_spin_lock(&m->spin_lock); TEE_ASSERT(m->value == MUTEX_VALUE_LOCKED); thread_rem_mutex(m); m->value = MUTEX_VALUE_UNLOCKED; cpu_spin_unlock(&m->spin_lock); thread_unmask_exceptions(old_itr_status); wq_wake_one(&m->wq); }
bool mutex_trylock(struct mutex *m) { uint32_t old_itr_status; enum mutex_value old_value; old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL); cpu_spin_lock(&m->spin_lock); old_value = m->value; if (old_value == MUTEX_VALUE_UNLOCKED) { m->value = MUTEX_VALUE_LOCKED; thread_add_mutex(m); } cpu_spin_unlock(&m->spin_lock); thread_unmask_exceptions(old_itr_status); return old_value == MUTEX_VALUE_UNLOCKED; }
void mutex_lock(struct mutex *m) { while (true) { uint32_t old_itr_status; enum mutex_value old_value; struct wait_queue_elem wqe; /* * If the mutex is locked we need to initialize the wqe * before releasing the spinlock to guarantee that we don't * miss the wakeup from mutex_unlock(). * * If the mutex is unlocked we don't need to use the wqe at * all. */ old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL); cpu_spin_lock(&m->spin_lock); old_value = m->value; if (old_value == MUTEX_VALUE_LOCKED) { wq_wait_init(&m->wq, &wqe); } else { m->value = MUTEX_VALUE_LOCKED; thread_add_mutex(m); } cpu_spin_unlock(&m->spin_lock); thread_unmask_exceptions(old_itr_status); if (old_value == MUTEX_VALUE_LOCKED) { /* * Someone else is holding the lock, wait in normal * world for the lock to become available. */ wq_wait_final(&m->wq, &wqe); } else return; } }
static void lock_global(void) { cpu_spin_lock(&thread_global_lock); }
void tee_l2cc_mutex_lock(void) { if (l2cc_mutex) cpu_spin_lock(l2cc_mutex); }