void condvar_wait(struct condvar *cv, struct mutex *m) { uint32_t old_itr_status; struct wait_queue_elem wqe; old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL); /* Link this condvar to this mutex until reinitialized */ cpu_spin_lock(&cv->spin_lock); TEE_ASSERT(!cv->m || cv->m == m); cv->m = m; cpu_spin_unlock(&cv->spin_lock); cpu_spin_lock(&m->spin_lock); /* Add to mutex wait queue as a condvar waiter */ wq_wait_init_condvar(&m->wq, &wqe, cv); /* Unlock the mutex */ TEE_ASSERT(m->value == MUTEX_VALUE_LOCKED); thread_rem_mutex(m); m->value = MUTEX_VALUE_UNLOCKED; cpu_spin_unlock(&m->spin_lock); thread_unmask_exceptions(old_itr_status); /* Wake eventual waiters */ wq_wake_one(&m->wq); wq_wait_final(&m->wq, &wqe); mutex_lock(m); }
static bool arm_va2pa_helper(void *va, paddr_t *pa) { uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_ALL); paddr_t par; paddr_t par_pa_mask; bool ret = false; #ifdef ARM32 write_ats1cpr((vaddr_t)va); isb(); #ifdef CFG_WITH_LPAE par = read_par64(); par_pa_mask = PAR64_PA_MASK; #else par = read_par32(); par_pa_mask = PAR32_PA_MASK; #endif #endif /*ARM32*/ #ifdef ARM64 write_at_s1e1r((vaddr_t)va); isb(); par = read_par_el1(); par_pa_mask = PAR_PA_MASK; #endif if (par & PAR_F) goto out; *pa = (par & (par_pa_mask << PAR_PA_SHIFT)) | ((vaddr_t)va & ((1 << PAR_PA_SHIFT) - 1)); ret = true; out: thread_unmask_exceptions(exceptions); return ret; }
static void cv_signal(struct condvar *cv, bool only_one) { uint32_t old_itr_status; struct mutex *m; old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL); cpu_spin_lock(&cv->spin_lock); m = cv->m; cpu_spin_unlock(&cv->spin_lock); thread_unmask_exceptions(old_itr_status); if (m) wq_promote_condvar(&m->wq, cv, only_one); }
void mutex_unlock(struct mutex *m) { uint32_t old_itr_status; old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL); cpu_spin_lock(&m->spin_lock); TEE_ASSERT(m->value == MUTEX_VALUE_LOCKED); thread_rem_mutex(m); m->value = MUTEX_VALUE_UNLOCKED; cpu_spin_unlock(&m->spin_lock); thread_unmask_exceptions(old_itr_status); wq_wake_one(&m->wq); }
bool mutex_trylock(struct mutex *m) { uint32_t old_itr_status; enum mutex_value old_value; old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL); cpu_spin_lock(&m->spin_lock); old_value = m->value; if (old_value == MUTEX_VALUE_UNLOCKED) { m->value = MUTEX_VALUE_LOCKED; thread_add_mutex(m); } cpu_spin_unlock(&m->spin_lock); thread_unmask_exceptions(old_itr_status); return old_value == MUTEX_VALUE_UNLOCKED; }
void mutex_lock(struct mutex *m) { while (true) { uint32_t old_itr_status; enum mutex_value old_value; struct wait_queue_elem wqe; /* * If the mutex is locked we need to initialize the wqe * before releasing the spinlock to guarantee that we don't * miss the wakeup from mutex_unlock(). * * If the mutex is unlocked we don't need to use the wqe at * all. */ old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL); cpu_spin_lock(&m->spin_lock); old_value = m->value; if (old_value == MUTEX_VALUE_LOCKED) { wq_wait_init(&m->wq, &wqe); } else { m->value = MUTEX_VALUE_LOCKED; thread_add_mutex(m); } cpu_spin_unlock(&m->spin_lock); thread_unmask_exceptions(old_itr_status); if (old_value == MUTEX_VALUE_LOCKED) { /* * Someone else is holding the lock, wait in normal * world for the lock to become available. */ wq_wait_final(&m->wq, &wqe); } else return; } }