void thinkos_cond_hook(void) { int32_t * arg = (int32_t *)cm3_sp_get(); unsigned int ret = arg[0]; unsigned int mwq = arg[1]; unsigned int mutex; uint32_t lr = cm3_lr_get(); int self = thinkos_rt.active; int th = self; mutex = mwq - THINKOS_MUTEX_BASE; (void)lr; (void)ret; (void)mwq; (void)mutex; DCC_LOG3(LOG_TRACE, "<%d> mutex=%d lr=0x%08x...", th, mwq, lr); for(;;); if (thinkos_rt.lock[mutex] == -1) { thinkos_rt.lock[mutex] = th; DCC_LOG2(LOG_TRACE, "<%d> mutex %d locked", th, mwq); return; } /* insert into the mutex wait queue */ __thinkos_wq_insert(mwq, th); DCC_LOG2(LOG_TRACE , "<%d> waiting on mutex %d...", th, mwq); }
void thinkos_ev_wait_svc(int32_t * arg) { unsigned int wq = arg[0]; unsigned int no = wq - THINKOS_EVENT_BASE; int self = thinkos_rt.active; unsigned int ev; #if THINKOS_ENABLE_ARG_CHECK if (no >= THINKOS_EVENT_MAX) { DCC_LOG1(LOG_ERROR, "object %d is not an event set!", wq); arg[0] = THINKOS_EINVAL; return; } #if THINKOS_ENABLE_EVENT_ALLOC if (__bit_mem_rd(&thinkos_rt.ev_alloc, no) == 0) { DCC_LOG1(LOG_ERROR, "invalid event set %d!", wq); arg[0] = THINKOS_EINVAL; return; } #endif #endif cm3_cpsid_i(); /* check for any pending unmasked event */ if ((ev = __clz(__rbit(thinkos_rt.ev[no].pend & thinkos_rt.ev[no].mask))) < 32) { DCC_LOG2(LOG_MSG, "set=0x%08x msk=0x%08x", thinkos_rt.ev[no].pend, thinkos_rt.ev[no].mask); __bit_mem_wr(&thinkos_rt.ev[no].pend, ev, 0); DCC_LOG2(LOG_INFO, "pending event %d.%d!", wq, ev); arg[0] = ev; cm3_cpsie_i(); return; } /* insert into the wait queue */ __thinkos_wq_insert(wq, self); /* wait for event */ /* remove from the ready wait queue */ __bit_mem_wr(&thinkos_rt.wq_ready, thinkos_rt.active, 0); #if THINKOS_ENABLE_TIMESHARE /* if the ready queue is empty, collect the threads from the CPU wait queue */ if (thinkos_rt.wq_ready == 0) { thinkos_rt.wq_ready = thinkos_rt.wq_tmshare; thinkos_rt.wq_tmshare = 0; } #endif cm3_cpsie_i(); DCC_LOG2(LOG_INFO, "<%d> waiting for event %d.xx ...", self, wq); /* signal the scheduler ... */ __thinkos_defer_sched(); }
void thinkos_sem_wait_svc(int32_t * arg) { unsigned int wq = arg[0]; unsigned int sem = wq - THINKOS_SEM_BASE; int self = thinkos_rt.active; #if THINKOS_ENABLE_ARG_CHECK if (sem >= THINKOS_SEMAPHORE_MAX) { DCC_LOG1(LOG_ERROR, "object %d is not a semaphore!", wq); arg[0] = THINKOS_EINVAL; return; } #if THINKOS_ENABLE_SEM_ALLOC if (__bit_mem_rd(thinkos_rt.sem_alloc, sem) == 0) { DCC_LOG1(LOG_ERROR, "invalid semaphore %d!", wq); arg[0] = THINKOS_EINVAL; return; } #endif #endif arg[0] = 0; /* avoid possible race condition on sem_val */ /* this is only necessary in case we use the __uthread_sem_post() call inside interrupt handlers */ /* TODO: study the possibility of using exclusive access instead of disabling interrupts. */ cm3_cpsid_i(); if (thinkos_rt.sem_val[sem] > 0) { thinkos_rt.sem_val[sem]--; /* reenable interrupts ... */ cm3_cpsie_i(); return; } /* insert into the semaphore wait queue */ __thinkos_wq_insert(wq, self); DCC_LOG2(LOG_INFO, "<%d> waiting on semaphore %d...", self, wq); /* wait for event */ /* remove from the ready wait queue */ __bit_mem_wr(&thinkos_rt.wq_ready, self, 0); #if THINKOS_ENABLE_TIMESHARE /* if the ready queue is empty, collect the threads from the CPU wait queue */ if (thinkos_rt.wq_ready == 0) { thinkos_rt.wq_ready = thinkos_rt.wq_tmshare; thinkos_rt.wq_tmshare = 0; } #endif /* reenable interrupts ... */ cm3_cpsie_i(); /* signal the scheduler ... */ __thinkos_defer_sched(); }
void thinkos_cond_wait_svc(int32_t * arg) { unsigned int cwq = arg[0]; unsigned int mwq = arg[1]; unsigned int cond = cwq - THINKOS_COND_BASE; unsigned int mutex = mwq - THINKOS_MUTEX_BASE; int self = thinkos_rt.active; int th; #if THINKOS_ENABLE_ARG_CHECK if (cond >= THINKOS_COND_MAX) { DCC_LOG1(LOG_ERROR, "invalid conditional variable %d!", cwq); arg[0] = THINKOS_EINVAL; return; } if (mutex >= THINKOS_MUTEX_MAX) { DCC_LOG1(LOG_ERROR, "invalid mutex %d!", mwq); arg[0] = THINKOS_EINVAL; return; } #if THINKOS_ENABLE_COND_ALLOC if (__bit_mem_rd(thinkos_rt.cond_alloc, cond) == 0) { DCC_LOG1(LOG_ERROR, "invalid conditional variable %d!", cwq); arg[0] = THINKOS_EINVAL; return; } #endif #if THINKOS_ENABLE_MUTEX_ALLOC if (__bit_mem_rd(thinkos_rt.mutex_alloc, mutex) == 0) { DCC_LOG1(LOG_ERROR, "invalid mutex %d!", mwq); arg[0] = THINKOS_EINVAL; return; } #endif #endif /* sanity check: avoid unlock the mutex by a thread that does not own the lock */ if (thinkos_rt.lock[mutex] != self) { DCC_LOG3(LOG_WARNING, "<%d> mutex %d is locked by <%d>", self, mwq, thinkos_rt.lock[mutex]); arg[0] = THINKOS_EPERM; return; } #if 0 /* assign the mutex to be locked on wakeup */ thinkos_rt.cond_mutex[cond] = mwq; #endif /* insert into the cond wait queue */ __thinkos_wq_insert(cwq, self); DCC_LOG3(LOG_INFO, "<%d> mutex %d unlocked, waiting on cond %d...", self, mwq, cwq); /* check for threads wating on the mutex wait queue */ if ((th = __thinkos_wq_head(mwq)) == THINKOS_THREAD_NULL) { /* no threads waiting on the lock, just release the lock */ DCC_LOG2(LOG_INFO, "<%d> mutex %d released", self, mwq); thinkos_rt.lock[mutex] = -1; } else { /* set the mutex ownership to the new thread */ thinkos_rt.lock[mutex] = th; DCC_LOG2(LOG_INFO, "<%d> mutex %d locked", th, mwq); /* wakeup from the mutex wait queue */ __thinkos_wakeup(mwq, th); } arg[0] = 0; /* wait for event */ __thinkos_wait(self); }
void thinkos_cond_broadcast_svc(int32_t * arg) { unsigned int cwq = arg[0]; unsigned int cond = cwq - THINKOS_COND_BASE; int th; #if THINKOS_ENABLE_ARG_CHECK if (cond >= THINKOS_COND_MAX) { DCC_LOG1(LOG_ERROR, "invalid conditional variable %d!", cwq); arg[0] = THINKOS_EINVAL; return; } #if THINKOS_ENABLE_COND_ALLOC if (__bit_mem_rd(thinkos_rt.cond_alloc, cond) == 0) { DCC_LOG1(LOG_ERROR, "invalid conditional variable %d!", cwq); arg[0] = THINKOS_EINVAL; return; } #endif #endif /* XXX: NEW experimental implementation: the cond_wait() and cond_timedwait() user calls invoque the mutex_lock() before returning */ /* insert all remaining threads into mutex wait queue */ if ((th = __thinkos_wq_head(cwq)) != THINKOS_THREAD_NULL) { DCC_LOG2(LOG_INFO, "<%d> wakeup from cond %d.", th, cwq); /* wakeup from the mutex wait queue */ __thinkos_wakeup(cwq, th); /* insert all remaining threads into mutex wait queue */ while ((th = __thinkos_wq_head(cwq)) != THINKOS_THREAD_NULL) { DCC_LOG2(LOG_INFO, "<%d> wakeup from cond %d.", th, cwq); __thinkos_wakeup(cwq, th); } /* signal the scheduler ... */ __thinkos_defer_sched(); } #if 0 unsigned int mwq; unsigned int mutex; /* get the mutex associated with the conditional variable */ if ((th = __thinkos_wq_head(cwq)) == THINKOS_THREAD_NULL) { /* no threads waiting on the conditional variable. */ } else { DCC_LOG2(LOG_INFO, "<%d> wakeup from cond %d.", th, cwq); /* remove from the conditional variable wait queue */ __thinkos_wq_remove(cwq, th); /* get the mutex associated with the conditional variable */ mwq = thinkos_rt.cond_mutex[cond]; mutex = mwq - THINKOS_MUTEX_BASE; /* check whether the mutex is locked or not */ if (thinkos_rt.lock[mutex] == -1) { /* no threads waiting on the lock... */ /* set the mutex ownership to the new thread */ thinkos_rt.lock[mutex] = th; DCC_LOG2(LOG_INFO, "<%d> mutex %d locked.", th, mwq); #if THINKOS_ENABLE_THREAD_STAT /* update status */ thinkos_rt.th_stat[th] = 0; #endif /* insert the thread into ready queue */ __bit_mem_wr(&thinkos_rt.wq_ready, th, 1); /* signal the scheduler ... */ __thinkos_defer_sched(); } else { /* insert into the mutex wait queue */ __thinkos_wq_insert(mwq, th); DCC_LOG2(LOG_INFO, "<%d> waiting on mutex %d...", th, mwq); } /* insert all remaining threads into mutex wait queue */ while ((th = __thinkos_wq_head(cwq)) != THINKOS_THREAD_NULL) { /* remove from the conditional variable wait queue */ __thinkos_wq_remove(cwq, th); /* insert into mutex wait queue */ __thinkos_wq_insert(mwq, th); DCC_LOG2(LOG_INFO, "<%d> waiting on mutex %d...", th, mwq); } } #endif arg[0] = 0; }
void thinkos_cancel_svc(int32_t * arg, int self) { /* Internal thread ids start form 0 whereas user thread numbers start form one ... */ unsigned int thread = (unsigned int)arg[0]; unsigned int thread_id; int code = arg[1]; unsigned int wq; int stat; if (thread == 0) thread_id = self; else thread_id = thread - 1; #if THINKOS_ENABLE_ARG_CHECK if (thread_id >= THINKOS_THREADS_MAX) { DCC_LOG1(LOG_ERROR, "invalid thread %d!", thread_id); __thinkos_error(THINKOS_ERR_THREAD_INVALID); arg[0] = THINKOS_EINVAL; return; } #if THINKOS_ENABLE_THREAD_ALLOC if (__bit_mem_rd(thinkos_rt.th_alloc, thread_id) == 0) { __thinkos_error(THINKOS_ERR_THREAD_ALLOC); arg[0] = THINKOS_EINVAL; return; } #endif #endif #if (THINKOS_ENABLE_THREAD_STAT == 0) #error "thinkos_cancel() depends on THINKOS_ENABLE_THREAD_STAT" #endif stat = thinkos_rt.th_stat[thread_id]; /* remove from other wait queue including wq_ready */ __bit_mem_wr(&thinkos_rt.wq_lst[stat >> 1], thread_id, 0); #if THINKOS_ENABLE_JOIN /* insert into the canceled wait queue and wait for a join call */ wq = __wq_idx(&thinkos_rt.wq_canceled); #else /* THINKOS_ENABLE_JOIN */ /* if join is not enabled insert into the ready queue */ wq = __wq_idx(&thinkos_rt.wq_ready); #endif /* THINKOS_ENABLE_JOIN */ __thinkos_wq_insert(wq, thread_id); #if THINKOS_ENABLE_TIMESHARE /* possibly remove from the time share wait queue */ __bit_mem_wr(&thinkos_rt.wq_tmshare, thread_id, 0); #endif #if THINKOS_ENABLE_CLOCK /* possibly remove from the time wait queue */ __bit_mem_wr(&thinkos_rt.wq_clock, thread_id, 0); #endif DCC_LOG3(LOG_TRACE, "<%d> cancel %d, with code %d!", thinkos_rt.active, thread_id, code); thinkos_rt.ctx[thread_id]->pc = (uint32_t)__thinkos_thread_exit; thinkos_rt.ctx[thread_id]->r0 = code; arg[0] = 0; }