void mtx_lock(mtx_t *mtx) { /* TODO: Implement recursive mutexes */ assert(mtx_owner(mtx) != thread_self()); while (!mtx_try_lock(mtx)) { cs_enter(); /* Check if the mutex got unlocked since a call to mtx_try_lock */ if (mtx->mtx_state == MTX_UNOWNED) { cs_leave(); continue; } assert(mtx_owned(mtx)); turnstile_wait(&mtx->turnstile); cs_leave(); } }
void mtx_yield_unlock(mtx_yield_t *mtx) { assert(mtx); cs_enter(); *mtx = 0; cs_leave(); }
void mtx_yield_lock(mtx_yield_t *mtx) { assert(mtx); while (true) { //while (*mtx == 1) //sleepq_wait(mtx); while (true) { cs_enter(); if (*mtx == 1) { sched_yield(true); //cs_leave(); continue; } break; } if (*mtx == 0) { *mtx = 1; cs_leave(); return; } cs_leave(); } }
void sched_add(thread_t *td) { // log("Add '%s' {%p} thread to scheduler", td->td_name, td); if (td == PCPU_GET(idle_thread)) return; td->td_state = TDS_READY; td->td_slice = SLICE; cs_enter(); runq_add(&runq, td); if (td->td_prio > thread_self()->td_prio) thread_self()->td_flags |= TDF_NEEDSWITCH; cs_leave(); }
void sched_switch(thread_t *newtd) { if (!sched_active) return; cs_enter(); thread_t *td = thread_self(); td->td_flags &= ~(TDF_SLICEEND | TDF_NEEDSWITCH); if (td->td_state == TDS_RUNNING) sched_add(td); if (newtd == NULL) newtd = sched_choose(); newtd->td_state = TDS_RUNNING; cs_leave(); if (td != newtd) ctx_switch(td, newtd); }
static void onexit_cleanup (void) { struct cs_status cs = cs_enter(); cleanup(); cs_leave(cs); }
void mtx_unlock(mtx_t *mtx) { cs_enter(); mtx->mtx_state = MTX_UNOWNED; turnstile_signal(&mtx->turnstile); cs_leave(); }