inline void nk_enqueue_thread_on_runq (nk_thread_t * t, int cpu) { NK_PROFILE_ENTRY(); nk_thread_queue_t * q = NULL; struct sys_info * sys = per_cpu_get(system); /* TODO: these checks should only occur at creation time */ if (unlikely(cpu <= CPU_ANY || cpu >= sys->num_cpus)) { q = per_cpu_get(run_q); } else { q = sys->cpus[cpu]->run_q; } /* bail if the run queue hasn't been created yet */ ASSERT(q); t->cur_run_q = q; t->status = NK_THR_SUSPENDED; nk_enqueue_entry_atomic(q, &(t->runq_node)); NK_PROFILE_EXIT(); }
int nk_rwlock_wr_unlock_irq_restore (nk_rwlock_t * l, uint8_t flags) { NK_PROFILE_ENTRY(); DEBUG_PRINT("rwlock write unlock (irq): %p\n", (void*)l); spin_unlock_irq_restore(&l->lock, flags); NK_PROFILE_EXIT(); return 0; }
int nk_rwlock_wr_unlock (nk_rwlock_t * l) { NK_PROFILE_ENTRY(); DEBUG_PRINT("rwlock write unlock: %p\n", (void*)l); spin_unlock(&l->lock); NK_PROFILE_EXIT(); return 0; }
int nk_rwlock_rd_unlock (nk_rwlock_t * l) { NK_PROFILE_ENTRY(); DEBUG_PRINT("rwlock read unlock: %p\n", (void*)l); int flags = spin_lock_irq_save(&l->lock); --l->readers; spin_unlock_irq_restore(&l->lock, flags); NK_PROFILE_EXIT(); return 0; }
int nk_rwlock_wr_lock (nk_rwlock_t * l) { NK_PROFILE_ENTRY(); DEBUG_PRINT("rwlock write lock: %p\n", (void*)l); while (1) { spin_lock(&l->lock); if (likely(l->readers == 0)) { break; } else { spin_unlock(&l->lock); /* TODO: we should yield if we're not spread across cores */ } } NK_PROFILE_EXIT(); return 0; }
uint8_t nk_rwlock_wr_lock_irq_save (nk_rwlock_t * l) { int flags; NK_PROFILE_ENTRY(); DEBUG_PRINT("rwlock write lock (irq): %p\n", (void*)l); while (1) { flags = spin_lock_irq_save(&l->lock); if (likely(l->readers == 0 )) { break; } else { spin_unlock_irq_restore(&l->lock, flags); } } NK_PROFILE_EXIT(); return flags; }