void thinkos_ev_wait_svc(int32_t * arg) { unsigned int wq = arg[0]; unsigned int no = wq - THINKOS_EVENT_BASE; int self = thinkos_rt.active; unsigned int ev; #if THINKOS_ENABLE_ARG_CHECK if (no >= THINKOS_EVENT_MAX) { DCC_LOG1(LOG_ERROR, "object %d is not an event set!", wq); arg[0] = THINKOS_EINVAL; return; } #if THINKOS_ENABLE_EVENT_ALLOC if (__bit_mem_rd(&thinkos_rt.ev_alloc, no) == 0) { DCC_LOG1(LOG_ERROR, "invalid event set %d!", wq); arg[0] = THINKOS_EINVAL; return; } #endif #endif cm3_cpsid_i(); /* check for any pending unmasked event */ if ((ev = __clz(__rbit(thinkos_rt.ev[no].pend & thinkos_rt.ev[no].mask))) < 32) { DCC_LOG2(LOG_MSG, "set=0x%08x msk=0x%08x", thinkos_rt.ev[no].pend, thinkos_rt.ev[no].mask); __bit_mem_wr(&thinkos_rt.ev[no].pend, ev, 0); DCC_LOG2(LOG_INFO, "pending event %d.%d!", wq, ev); arg[0] = ev; cm3_cpsie_i(); return; } /* insert into the wait queue */ __thinkos_wq_insert(wq, self); /* wait for event */ /* remove from the ready wait queue */ __bit_mem_wr(&thinkos_rt.wq_ready, thinkos_rt.active, 0); #if THINKOS_ENABLE_TIMESHARE /* if the ready queue is empty, collect the threads from the CPU wait queue */ if (thinkos_rt.wq_ready == 0) { thinkos_rt.wq_ready = thinkos_rt.wq_tmshare; thinkos_rt.wq_tmshare = 0; } #endif cm3_cpsie_i(); DCC_LOG2(LOG_INFO, "<%d> waiting for event %d.xx ...", self, wq); /* signal the scheduler ... */ __thinkos_defer_sched(); }
void thinkos_sem_init_svc(int32_t * arg) { unsigned int wq = arg[0]; unsigned int sem = wq - THINKOS_SEM_BASE; uint32_t value = (uint32_t)arg[1]; #if THINKOS_ENABLE_ARG_CHECK if (sem >= THINKOS_SEMAPHORE_MAX) { DCC_LOG1(LOG_ERROR, "object %d is not a semaphore!", wq); arg[0] = THINKOS_EINVAL; return; } #if THINKOS_ENABLE_SEM_ALLOC if (__bit_mem_rd(thinkos_rt.sem_alloc, sem) == 0) { DCC_LOG1(LOG_ERROR, "invalid semaphore %d!", wq); arg[0] = THINKOS_EINVAL; return; } #endif #endif DCC_LOG2(LOG_TRACE, "sem[%d] <= %d", sem, value); thinkos_rt.sem_val[sem] = value; arg[0] = 0; }
void thinkos_sem_timedwait_svc(int32_t * arg) { unsigned int wq = arg[0]; unsigned int sem = wq - THINKOS_SEM_BASE; uint32_t ms = (uint32_t)arg[1]; int self = thinkos_rt.active; #if THINKOS_ENABLE_ARG_CHECK if (sem >= THINKOS_SEMAPHORE_MAX) { DCC_LOG1(LOG_ERROR, "object %d is not a semaphore!", wq); arg[0] = THINKOS_EINVAL; return; } #if THINKOS_ENABLE_SEM_ALLOC if (__bit_mem_rd(thinkos_rt.sem_alloc, sem) == 0) { DCC_LOG1(LOG_ERROR, "invalid semaphore %d!", wq); arg[0] = THINKOS_EINVAL; return; } #endif #endif /* avoid possible race condition on sem_val */ /* this is only necessary in case we use the __uthread_sem_post() call inside interrupt handlers */ /* TODO: study the possibility of using exclusive access instead of disabling interrupts. */ cm3_cpsid_i(); if (thinkos_rt.sem_val[sem] > 0) { thinkos_rt.sem_val[sem]--; arg[0] = 0; } else { /* insert into the semaphore wait queue */ __thinkos_tmdwq_insert(wq, self, ms); DCC_LOG2(LOG_INFO, "<%d> waiting on semaphore %d...", self, wq); /* wait for event */ /* remove from the ready wait queue */ __bit_mem_wr(&thinkos_rt.wq_ready, self, 0); #if THINKOS_ENABLE_TIMESHARE /* if the ready queue is empty, collect the threads from the CPU wait queue */ if (thinkos_rt.wq_ready == 0) { thinkos_rt.wq_ready = thinkos_rt.wq_tmshare; thinkos_rt.wq_tmshare = 0; } #endif /* Set the default return value to timeout. The sem_post call will change this to 0 */ arg[0] = THINKOS_ETIMEDOUT; } /* reenable interrupts ... */ cm3_cpsie_i(); /* signal the scheduler ... */ __thinkos_defer_sched(); }
void thinkos_ev_raise_svc(int32_t * arg) { unsigned int wq = arg[0]; unsigned int ev = arg[1]; unsigned int no = wq - THINKOS_EVENT_BASE; int th; #if THINKOS_ENABLE_ARG_CHECK if (ev > 31) { DCC_LOG1(LOG_ERROR, "event %d is invalid!", ev); arg[0] = THINKOS_EINVAL; return; } if (no >= THINKOS_EVENT_MAX) { DCC_LOG1(LOG_ERROR, "object %d is not an event set!", wq); arg[0] = THINKOS_EINVAL; return; } #if THINKOS_ENABLE_EVENT_ALLOC if (__bit_mem_rd(&thinkos_rt.ev_alloc, no) == 0) { DCC_LOG1(LOG_ERROR, "invalid event set %d!", wq); arg[0] = THINKOS_EINVAL; return; } #endif #endif if ((__bit_mem_rd(&thinkos_rt.ev[no].mask, ev)) && ((th = __thinkos_wq_head(wq)) != THINKOS_THREAD_NULL)) { /* wakeup from the event wait queue, set the return of the thread to the event */ __thinkos_wakeup_return(wq, th, ev); DCC_LOG3(LOG_INFO, "<%d> waked up with event %d.%d", th, wq, ev); /* signal the scheduler ... */ __thinkos_defer_sched(); } else { /* event is maksed or no thread is waiting ont hte event set , set the event as pending */ __bit_mem_wr(&thinkos_rt.ev[no].pend, ev, 1); DCC_LOG2(LOG_INFO, "event %d.%d pendig...", wq, ev); DCC_LOG2(LOG_MSG, "set=0x%08x msk=0x%08x", thinkos_rt.ev[no].pend, thinkos_rt.ev[no].mask); } }
bool __thinkos_thread_isfaulty(unsigned int th) { if (th >= THINKOS_THREADS_MAX) return false; if (thinkos_rt.ctx[th] == NULL) return false; return __bit_mem_rd(&thinkos_rt.wq_fault, th); }
void thinkos_sem_post_svc(int32_t * arg) { unsigned int wq = arg[0]; unsigned int sem = wq - THINKOS_SEM_BASE; int th; #if THINKOS_ENABLE_ARG_CHECK if (sem >= THINKOS_SEMAPHORE_MAX) { DCC_LOG1(LOG_ERROR, "object %d is not a semaphore!", wq); arg[0] = THINKOS_EINVAL; return; } #if THINKOS_ENABLE_SEM_ALLOC if (__bit_mem_rd(thinkos_rt.sem_alloc, sem) == 0) { DCC_LOG1(LOG_ERROR, "invalid semaphore %d!", wq); arg[0] = THINKOS_EINVAL; return; } #endif #endif arg[0] = 0; cm3_cpsid_i(); if ((th = __thinkos_wq_head(wq)) == THINKOS_THREAD_NULL) { /* no threads waiting on the semaphore, increment. */ thinkos_rt.sem_val[sem]++; } else { /* wakeup from the sem wait queue */ __thinkos_wakeup(wq, th); DCC_LOG2(LOG_INFO, "<%d> wakeup from sem %d ", th, wq); /* signal the scheduler ... */ __thinkos_defer_sched(); } cm3_cpsie_i(); }
void thinkos_sem_trywait_svc(int32_t * arg) { unsigned int wq = arg[0]; unsigned int sem = wq - THINKOS_SEM_BASE; #if THINKOS_ENABLE_ARG_CHECK if (sem >= THINKOS_SEMAPHORE_MAX) { DCC_LOG1(LOG_ERROR, "object %d is not a semaphore!", wq); arg[0] = THINKOS_EINVAL; return; } #if THINKOS_ENABLE_SEM_ALLOC if (__bit_mem_rd(thinkos_rt.sem_alloc, sem) == 0) { DCC_LOG1(LOG_ERROR, "invalid semaphore %d!", wq); arg[0] = THINKOS_EINVAL; return; } #endif #endif /* avoid possible race condition on sem_val */ /* this is only necessary in case we use the __uthread_sem_post() call inside interrupt handlers */ /* TODO: study the possibility of using exclusive access instead of disabling interrupts. */ cm3_cpsid_i(); if (thinkos_rt.sem_val[sem] > 0) { thinkos_rt.sem_val[sem]--; arg[0] = 0; } else { arg[0] = THINKOS_EAGAIN; } cm3_cpsie_i(); }
void thinkos_ev_mask_svc(int32_t * arg) { unsigned int wq = arg[0]; uint32_t mask = arg[1]; unsigned int no = wq - THINKOS_EVENT_BASE; #if THINKOS_ENABLE_ARG_CHECK if (no >= THINKOS_EVENT_MAX) { DCC_LOG1(LOG_ERROR, "object %d is not an event set!", wq); arg[0] = THINKOS_EINVAL; return; } #if THINKOS_ENABLE_EVENT_ALLOC if (__bit_mem_rd(&thinkos_rt.ev_alloc, no) == 0) { DCC_LOG1(LOG_ERROR, "invalid event set %d!", wq); arg[0] = THINKOS_EINVAL; return; } #endif #endif /* mask the events on the mask bitmap */ thinkos_rt.ev[no].mask &= ~mask; }
void thinkos_cond_wait_svc(int32_t * arg) { unsigned int cwq = arg[0]; unsigned int mwq = arg[1]; unsigned int cond = cwq - THINKOS_COND_BASE; unsigned int mutex = mwq - THINKOS_MUTEX_BASE; int self = thinkos_rt.active; int th; #if THINKOS_ENABLE_ARG_CHECK if (cond >= THINKOS_COND_MAX) { DCC_LOG1(LOG_ERROR, "invalid conditional variable %d!", cwq); arg[0] = THINKOS_EINVAL; return; } if (mutex >= THINKOS_MUTEX_MAX) { DCC_LOG1(LOG_ERROR, "invalid mutex %d!", mwq); arg[0] = THINKOS_EINVAL; return; } #if THINKOS_ENABLE_COND_ALLOC if (__bit_mem_rd(thinkos_rt.cond_alloc, cond) == 0) { DCC_LOG1(LOG_ERROR, "invalid conditional variable %d!", cwq); arg[0] = THINKOS_EINVAL; return; } #endif #if THINKOS_ENABLE_MUTEX_ALLOC if (__bit_mem_rd(thinkos_rt.mutex_alloc, mutex) == 0) { DCC_LOG1(LOG_ERROR, "invalid mutex %d!", mwq); arg[0] = THINKOS_EINVAL; return; } #endif #endif /* sanity check: avoid unlock the mutex by a thread that does not own the lock */ if (thinkos_rt.lock[mutex] != self) { DCC_LOG3(LOG_WARNING, "<%d> mutex %d is locked by <%d>", self, mwq, thinkos_rt.lock[mutex]); arg[0] = THINKOS_EPERM; return; } #if 0 /* assign the mutex to be locked on wakeup */ thinkos_rt.cond_mutex[cond] = mwq; #endif /* insert into the cond wait queue */ __thinkos_wq_insert(cwq, self); DCC_LOG3(LOG_INFO, "<%d> mutex %d unlocked, waiting on cond %d...", self, mwq, cwq); /* check for threads wating on the mutex wait queue */ if ((th = __thinkos_wq_head(mwq)) == THINKOS_THREAD_NULL) { /* no threads waiting on the lock, just release the lock */ DCC_LOG2(LOG_INFO, "<%d> mutex %d released", self, mwq); thinkos_rt.lock[mutex] = -1; } else { /* set the mutex ownership to the new thread */ thinkos_rt.lock[mutex] = th; DCC_LOG2(LOG_INFO, "<%d> mutex %d locked", th, mwq); /* wakeup from the mutex wait queue */ __thinkos_wakeup(mwq, th); } arg[0] = 0; /* wait for event */ __thinkos_wait(self); }
void thinkos_cond_broadcast_svc(int32_t * arg) { unsigned int cwq = arg[0]; unsigned int cond = cwq - THINKOS_COND_BASE; int th; #if THINKOS_ENABLE_ARG_CHECK if (cond >= THINKOS_COND_MAX) { DCC_LOG1(LOG_ERROR, "invalid conditional variable %d!", cwq); arg[0] = THINKOS_EINVAL; return; } #if THINKOS_ENABLE_COND_ALLOC if (__bit_mem_rd(thinkos_rt.cond_alloc, cond) == 0) { DCC_LOG1(LOG_ERROR, "invalid conditional variable %d!", cwq); arg[0] = THINKOS_EINVAL; return; } #endif #endif /* XXX: NEW experimental implementation: the cond_wait() and cond_timedwait() user calls invoque the mutex_lock() before returning */ /* insert all remaining threads into mutex wait queue */ if ((th = __thinkos_wq_head(cwq)) != THINKOS_THREAD_NULL) { DCC_LOG2(LOG_INFO, "<%d> wakeup from cond %d.", th, cwq); /* wakeup from the mutex wait queue */ __thinkos_wakeup(cwq, th); /* insert all remaining threads into mutex wait queue */ while ((th = __thinkos_wq_head(cwq)) != THINKOS_THREAD_NULL) { DCC_LOG2(LOG_INFO, "<%d> wakeup from cond %d.", th, cwq); __thinkos_wakeup(cwq, th); } /* signal the scheduler ... */ __thinkos_defer_sched(); } #if 0 unsigned int mwq; unsigned int mutex; /* get the mutex associated with the conditional variable */ if ((th = __thinkos_wq_head(cwq)) == THINKOS_THREAD_NULL) { /* no threads waiting on the conditional variable. */ } else { DCC_LOG2(LOG_INFO, "<%d> wakeup from cond %d.", th, cwq); /* remove from the conditional variable wait queue */ __thinkos_wq_remove(cwq, th); /* get the mutex associated with the conditional variable */ mwq = thinkos_rt.cond_mutex[cond]; mutex = mwq - THINKOS_MUTEX_BASE; /* check whether the mutex is locked or not */ if (thinkos_rt.lock[mutex] == -1) { /* no threads waiting on the lock... */ /* set the mutex ownership to the new thread */ thinkos_rt.lock[mutex] = th; DCC_LOG2(LOG_INFO, "<%d> mutex %d locked.", th, mwq); #if THINKOS_ENABLE_THREAD_STAT /* update status */ thinkos_rt.th_stat[th] = 0; #endif /* insert the thread into ready queue */ __bit_mem_wr(&thinkos_rt.wq_ready, th, 1); /* signal the scheduler ... */ __thinkos_defer_sched(); } else { /* insert into the mutex wait queue */ __thinkos_wq_insert(mwq, th); DCC_LOG2(LOG_INFO, "<%d> waiting on mutex %d...", th, mwq); } /* insert all remaining threads into mutex wait queue */ while ((th = __thinkos_wq_head(cwq)) != THINKOS_THREAD_NULL) { /* remove from the conditional variable wait queue */ __thinkos_wq_remove(cwq, th); /* insert into mutex wait queue */ __thinkos_wq_insert(mwq, th); DCC_LOG2(LOG_INFO, "<%d> waiting on mutex %d...", th, mwq); } } #endif arg[0] = 0; }
void thinkos_cancel_svc(int32_t * arg, int self) { /* Internal thread ids start form 0 whereas user thread numbers start form one ... */ unsigned int thread = (unsigned int)arg[0]; unsigned int thread_id; int code = arg[1]; unsigned int wq; int stat; if (thread == 0) thread_id = self; else thread_id = thread - 1; #if THINKOS_ENABLE_ARG_CHECK if (thread_id >= THINKOS_THREADS_MAX) { DCC_LOG1(LOG_ERROR, "invalid thread %d!", thread_id); __thinkos_error(THINKOS_ERR_THREAD_INVALID); arg[0] = THINKOS_EINVAL; return; } #if THINKOS_ENABLE_THREAD_ALLOC if (__bit_mem_rd(thinkos_rt.th_alloc, thread_id) == 0) { __thinkos_error(THINKOS_ERR_THREAD_ALLOC); arg[0] = THINKOS_EINVAL; return; } #endif #endif #if (THINKOS_ENABLE_THREAD_STAT == 0) #error "thinkos_cancel() depends on THINKOS_ENABLE_THREAD_STAT" #endif stat = thinkos_rt.th_stat[thread_id]; /* remove from other wait queue including wq_ready */ __bit_mem_wr(&thinkos_rt.wq_lst[stat >> 1], thread_id, 0); #if THINKOS_ENABLE_JOIN /* insert into the canceled wait queue and wait for a join call */ wq = __wq_idx(&thinkos_rt.wq_canceled); #else /* THINKOS_ENABLE_JOIN */ /* if join is not enabled insert into the ready queue */ wq = __wq_idx(&thinkos_rt.wq_ready); #endif /* THINKOS_ENABLE_JOIN */ __thinkos_wq_insert(wq, thread_id); #if THINKOS_ENABLE_TIMESHARE /* possibly remove from the time share wait queue */ __bit_mem_wr(&thinkos_rt.wq_tmshare, thread_id, 0); #endif #if THINKOS_ENABLE_CLOCK /* possibly remove from the time wait queue */ __bit_mem_wr(&thinkos_rt.wq_clock, thread_id, 0); #endif DCC_LOG3(LOG_TRACE, "<%d> cancel %d, with code %d!", thinkos_rt.active, thread_id, code); thinkos_rt.ctx[thread_id]->pc = (uint32_t)__thinkos_thread_exit; thinkos_rt.ctx[thread_id]->r0 = code; arg[0] = 0; }
void thinkos_ev_unmask_svc(int32_t * arg) { unsigned int wq = arg[0]; uint32_t mask = arg[1]; unsigned int no = wq - THINKOS_EVENT_BASE; unsigned int ev; int th; #if THINKOS_ENABLE_ARG_CHECK if (no >= THINKOS_EVENT_MAX) { DCC_LOG1(LOG_ERROR, "object %d is not an event set!", wq); arg[0] = THINKOS_EINVAL; return; } #if THINKOS_ENABLE_EVENT_ALLOC if (__bit_mem_rd(&thinkos_rt.ev_alloc, no) == 0) { DCC_LOG1(LOG_ERROR, "invalid event set %d!", wq); arg[0] = THINKOS_EINVAL; return; } #endif #endif cm3_cpsid_i(); /* unmask the events on the mask bitmap */ thinkos_rt.ev[no].mask |= mask; /* wake up the first unmasked thread if any. */ if ((ev = __clz(__rbit(thinkos_rt.ev[no].pend & mask))) < 32) { if ((th = __thinkos_wq_head(wq)) != THINKOS_THREAD_NULL) { /* a pending event was unmaksed and there is a thread waiting on the queue, clear the event pending flag and wakes up the thread. */ __bit_mem_wr(&thinkos_rt.ev[no].pend, ev, 0); /* wakeup from the event wait queue, set the return of the thread to the event */ __thinkos_wakeup_return(wq, th, ev); DCC_LOG3(LOG_TRACE, "<%d> waked up with event %d.%d", th, wq, ev); /* signal the scheduler ... */ __thinkos_defer_sched(); } else { /* no threads waiting */ cm3_cpsie_i(); return; } } /* wake up as many other threads as possible */ while ((ev = __clz(__rbit(thinkos_rt.ev[no].pend & mask))) < 32) { if ((th = __thinkos_wq_head(wq)) != THINKOS_THREAD_NULL) { /* a pending event was unmaksed and there is a thread waiting on the queue, clear the event pending flag and wakes up the thread. */ __bit_mem_wr(&thinkos_rt.ev[no].pend, ev, 0); /* wakeup from the event wait queue, set the return of the thread to the event */ __thinkos_wakeup_return(wq, th, ev); DCC_LOG3(LOG_TRACE, "<%d> waked up with event %d.%d", th, wq, ev); } else { /* no more threads waiting */ break; } } cm3_cpsie_i(); }
void thinkos_ev_timedwait_svc(int32_t * arg) { unsigned int wq = arg[0]; uint32_t ms = (uint32_t)arg[1]; unsigned int no = wq - THINKOS_EVENT_BASE; int self = thinkos_rt.active; unsigned int ev; #if THINKOS_ENABLE_ARG_CHECK if (no >= THINKOS_EVENT_MAX) { DCC_LOG1(LOG_ERROR, "object %d is not an event set!", wq); arg[0] = THINKOS_EINVAL; return; } #if THINKOS_ENABLE_EVENT_ALLOC if (__bit_mem_rd(&thinkos_rt.ev_alloc, no) == 0) { DCC_LOG1(LOG_ERROR, "invalid event set %d!", wq); arg[0] = THINKOS_EINVAL; return; } #endif #endif cm3_cpsid_i(); /* check for any pending unmasked event */ if ((ev = __clz(__rbit(thinkos_rt.ev[no].pend & thinkos_rt.ev[no].mask))) < 32) { __bit_mem_wr(&thinkos_rt.ev[no].pend, ev, 0); arg[0] = ev; cm3_cpsie_i(); return; } /* insert into the mutex wait queue */ __thinkos_tmdwq_insert(wq, self, ms); /* wait for event */ /* remove from the ready wait queue */ __bit_mem_wr(&thinkos_rt.wq_ready, thinkos_rt.active, 0); #if THINKOS_ENABLE_TIMESHARE /* if the ready queue is empty, collect the threads from the CPU wait queue */ if (thinkos_rt.wq_ready == 0) { thinkos_rt.wq_ready = thinkos_rt.wq_tmshare; thinkos_rt.wq_tmshare = 0; } #endif /* Set the default return value to timeout. The ev_rise() call will change it to the active event */ arg[0] = THINKOS_ETIMEDOUT; cm3_cpsie_i(); DCC_LOG2(LOG_INFO, "<%d> waiting for event %d...", self, wq); /* signal the scheduler ... */ __thinkos_defer_sched(); }