void thinkos_ev_wait_svc(int32_t * arg) { unsigned int wq = arg[0]; unsigned int no = wq - THINKOS_EVENT_BASE; int self = thinkos_rt.active; unsigned int ev; #if THINKOS_ENABLE_ARG_CHECK if (no >= THINKOS_EVENT_MAX) { DCC_LOG1(LOG_ERROR, "object %d is not an event set!", wq); arg[0] = THINKOS_EINVAL; return; } #if THINKOS_ENABLE_EVENT_ALLOC if (__bit_mem_rd(&thinkos_rt.ev_alloc, no) == 0) { DCC_LOG1(LOG_ERROR, "invalid event set %d!", wq); arg[0] = THINKOS_EINVAL; return; } #endif #endif cm3_cpsid_i(); /* check for any pending unmasked event */ if ((ev = __clz(__rbit(thinkos_rt.ev[no].pend & thinkos_rt.ev[no].mask))) < 32) { DCC_LOG2(LOG_MSG, "set=0x%08x msk=0x%08x", thinkos_rt.ev[no].pend, thinkos_rt.ev[no].mask); __bit_mem_wr(&thinkos_rt.ev[no].pend, ev, 0); DCC_LOG2(LOG_INFO, "pending event %d.%d!", wq, ev); arg[0] = ev; cm3_cpsie_i(); return; } /* insert into the wait queue */ __thinkos_wq_insert(wq, self); /* wait for event */ /* remove from the ready wait queue */ __bit_mem_wr(&thinkos_rt.wq_ready, thinkos_rt.active, 0); #if THINKOS_ENABLE_TIMESHARE /* if the ready queue is empty, collect the threads from the CPU wait queue */ if (thinkos_rt.wq_ready == 0) { thinkos_rt.wq_ready = thinkos_rt.wq_tmshare; thinkos_rt.wq_tmshare = 0; } #endif cm3_cpsie_i(); DCC_LOG2(LOG_INFO, "<%d> waiting for event %d.xx ...", self, wq); /* signal the scheduler ... */ __thinkos_defer_sched(); }
void thinkos_irq_wait_svc(int32_t * arg, int self) { unsigned int irq = arg[0]; #if THINKOS_ENABLE_ARG_CHECK if (irq >= THINKOS_IRQ_MAX) { DCC_LOG1(LOG_ERROR, "invalid IRQ %d!", irq); __thinkos_error(THINKOS_ERR_IRQ_INVALID); arg[0] = THINKOS_EINVAL; return; } #endif /* clear pending interrupt */ cm3_irq_pend_clr(irq); /* wait for event */ __thinkos_suspend(self); /* store the thread info */ thinkos_rt.irq_th[irq] = self; /* signal the scheduler ... */ __thinkos_defer_sched(); /* enable this interrupt source */ cm3_irq_enable(irq); }
void thinkos_sem_timedwait_svc(int32_t * arg) { unsigned int wq = arg[0]; unsigned int sem = wq - THINKOS_SEM_BASE; uint32_t ms = (uint32_t)arg[1]; int self = thinkos_rt.active; #if THINKOS_ENABLE_ARG_CHECK if (sem >= THINKOS_SEMAPHORE_MAX) { DCC_LOG1(LOG_ERROR, "object %d is not a semaphore!", wq); arg[0] = THINKOS_EINVAL; return; } #if THINKOS_ENABLE_SEM_ALLOC if (__bit_mem_rd(thinkos_rt.sem_alloc, sem) == 0) { DCC_LOG1(LOG_ERROR, "invalid semaphore %d!", wq); arg[0] = THINKOS_EINVAL; return; } #endif #endif /* avoid possible race condition on sem_val */ /* this is only necessary in case we use the __uthread_sem_post() call inside interrupt handlers */ /* TODO: study the possibility of using exclusive access instead of disabling interrupts. */ cm3_cpsid_i(); if (thinkos_rt.sem_val[sem] > 0) { thinkos_rt.sem_val[sem]--; arg[0] = 0; } else { /* insert into the semaphore wait queue */ __thinkos_tmdwq_insert(wq, self, ms); DCC_LOG2(LOG_INFO, "<%d> waiting on semaphore %d...", self, wq); /* wait for event */ /* remove from the ready wait queue */ __bit_mem_wr(&thinkos_rt.wq_ready, self, 0); #if THINKOS_ENABLE_TIMESHARE /* if the ready queue is empty, collect the threads from the CPU wait queue */ if (thinkos_rt.wq_ready == 0) { thinkos_rt.wq_ready = thinkos_rt.wq_tmshare; thinkos_rt.wq_tmshare = 0; } #endif /* Set the default return value to timeout. The sem_post call will change this to 0 */ arg[0] = THINKOS_ETIMEDOUT; } /* reenable interrupts ... */ cm3_cpsie_i(); /* signal the scheduler ... */ __thinkos_defer_sched(); }
void cm3_default_isr(int irq) { int th; /* disable this interrupt source */ cm3_irq_disable(irq); th = thinkos_rt.irq_th[irq]; /* insert the thread into ready queue */ __bit_mem_wr(&thinkos_rt.wq_ready, th, 1); /* signal the scheduler ... */ __thinkos_defer_sched(); }
void thinkos_ev_raise_svc(int32_t * arg) { unsigned int wq = arg[0]; unsigned int ev = arg[1]; unsigned int no = wq - THINKOS_EVENT_BASE; int th; #if THINKOS_ENABLE_ARG_CHECK if (ev > 31) { DCC_LOG1(LOG_ERROR, "event %d is invalid!", ev); arg[0] = THINKOS_EINVAL; return; } if (no >= THINKOS_EVENT_MAX) { DCC_LOG1(LOG_ERROR, "object %d is not an event set!", wq); arg[0] = THINKOS_EINVAL; return; } #if THINKOS_ENABLE_EVENT_ALLOC if (__bit_mem_rd(&thinkos_rt.ev_alloc, no) == 0) { DCC_LOG1(LOG_ERROR, "invalid event set %d!", wq); arg[0] = THINKOS_EINVAL; return; } #endif #endif if ((__bit_mem_rd(&thinkos_rt.ev[no].mask, ev)) && ((th = __thinkos_wq_head(wq)) != THINKOS_THREAD_NULL)) { /* wakeup from the event wait queue, set the return of the thread to the event */ __thinkos_wakeup_return(wq, th, ev); DCC_LOG3(LOG_INFO, "<%d> waked up with event %d.%d", th, wq, ev); /* signal the scheduler ... */ __thinkos_defer_sched(); } else { /* event is maksed or no thread is waiting ont hte event set , set the event as pending */ __bit_mem_wr(&thinkos_rt.ev[no].pend, ev, 1); DCC_LOG2(LOG_INFO, "event %d.%d pendig...", wq, ev); DCC_LOG2(LOG_MSG, "set=0x%08x msk=0x%08x", thinkos_rt.ev[no].pend, thinkos_rt.ev[no].mask); } }
void thinkos_idle_svc(int32_t * arg) { #if THINKOS_ENABLE_MONITOR dbgmon_signal_idle(); #endif #if THINKOS_ENABLE_CRITICAL /* Force the scheduler to run if there are threads in the ready queue. */ #if ((THINKOS_THREADS_MAX) < 32) if (thinkos_rt.wq_ready != (1 << (THINKOS_THREADS_MAX))) #else if (thinkos_rt.wq_ready != 0) #endif { __thinkos_defer_sched(); } #endif }
void thinkos_sem_post_svc(int32_t * arg) { unsigned int wq = arg[0]; unsigned int sem = wq - THINKOS_SEM_BASE; int th; #if THINKOS_ENABLE_ARG_CHECK if (sem >= THINKOS_SEMAPHORE_MAX) { DCC_LOG1(LOG_ERROR, "object %d is not a semaphore!", wq); arg[0] = THINKOS_EINVAL; return; } #if THINKOS_ENABLE_SEM_ALLOC if (__bit_mem_rd(thinkos_rt.sem_alloc, sem) == 0) { DCC_LOG1(LOG_ERROR, "invalid semaphore %d!", wq); arg[0] = THINKOS_EINVAL; return; } #endif #endif arg[0] = 0; cm3_cpsid_i(); if ((th = __thinkos_wq_head(wq)) == THINKOS_THREAD_NULL) { /* no threads waiting on the semaphore, increment. */ thinkos_rt.sem_val[sem]++; } else { /* wakeup from the sem wait queue */ __thinkos_wakeup(wq, th); DCC_LOG2(LOG_INFO, "<%d> wakeup from sem %d ", th, wq); /* signal the scheduler ... */ __thinkos_defer_sched(); } cm3_cpsie_i(); }
void thinkos_cond_broadcast_svc(int32_t * arg) { unsigned int cwq = arg[0]; unsigned int cond = cwq - THINKOS_COND_BASE; int th; #if THINKOS_ENABLE_ARG_CHECK if (cond >= THINKOS_COND_MAX) { DCC_LOG1(LOG_ERROR, "invalid conditional variable %d!", cwq); arg[0] = THINKOS_EINVAL; return; } #if THINKOS_ENABLE_COND_ALLOC if (__bit_mem_rd(thinkos_rt.cond_alloc, cond) == 0) { DCC_LOG1(LOG_ERROR, "invalid conditional variable %d!", cwq); arg[0] = THINKOS_EINVAL; return; } #endif #endif /* XXX: NEW experimental implementation: the cond_wait() and cond_timedwait() user calls invoque the mutex_lock() before returning */ /* insert all remaining threads into mutex wait queue */ if ((th = __thinkos_wq_head(cwq)) != THINKOS_THREAD_NULL) { DCC_LOG2(LOG_INFO, "<%d> wakeup from cond %d.", th, cwq); /* wakeup from the mutex wait queue */ __thinkos_wakeup(cwq, th); /* insert all remaining threads into mutex wait queue */ while ((th = __thinkos_wq_head(cwq)) != THINKOS_THREAD_NULL) { DCC_LOG2(LOG_INFO, "<%d> wakeup from cond %d.", th, cwq); __thinkos_wakeup(cwq, th); } /* signal the scheduler ... */ __thinkos_defer_sched(); } #if 0 unsigned int mwq; unsigned int mutex; /* get the mutex associated with the conditional variable */ if ((th = __thinkos_wq_head(cwq)) == THINKOS_THREAD_NULL) { /* no threads waiting on the conditional variable. */ } else { DCC_LOG2(LOG_INFO, "<%d> wakeup from cond %d.", th, cwq); /* remove from the conditional variable wait queue */ __thinkos_wq_remove(cwq, th); /* get the mutex associated with the conditional variable */ mwq = thinkos_rt.cond_mutex[cond]; mutex = mwq - THINKOS_MUTEX_BASE; /* check whether the mutex is locked or not */ if (thinkos_rt.lock[mutex] == -1) { /* no threads waiting on the lock... */ /* set the mutex ownership to the new thread */ thinkos_rt.lock[mutex] = th; DCC_LOG2(LOG_INFO, "<%d> mutex %d locked.", th, mwq); #if THINKOS_ENABLE_THREAD_STAT /* update status */ thinkos_rt.th_stat[th] = 0; #endif /* insert the thread into ready queue */ __bit_mem_wr(&thinkos_rt.wq_ready, th, 1); /* signal the scheduler ... */ __thinkos_defer_sched(); } else { /* insert into the mutex wait queue */ __thinkos_wq_insert(mwq, th); DCC_LOG2(LOG_INFO, "<%d> waiting on mutex %d...", th, mwq); } /* insert all remaining threads into mutex wait queue */ while ((th = __thinkos_wq_head(cwq)) != THINKOS_THREAD_NULL) { /* remove from the conditional variable wait queue */ __thinkos_wq_remove(cwq, th); /* insert into mutex wait queue */ __thinkos_wq_insert(mwq, th); DCC_LOG2(LOG_INFO, "<%d> waiting on mutex %d...", th, mwq); } } #endif arg[0] = 0; }
/* initialize a thread context */ void thinkos_thread_create_svc(int32_t * arg) { struct thinkos_thread_init * init = (struct thinkos_thread_init *)arg; /* Internal thread ids start form 0 whereas user thread numbers start form one ... */ int target_id = init->opt.id - 1; int thread_id; uint32_t sp; #if THINKOS_ENABLE_THREAD_ALLOC DCC_LOG1(LOG_INFO, "thinkos_rt.th_alloc=0x%08x", thinkos_rt.th_alloc[0]); if (target_id >= THINKOS_THREADS_MAX) { thread_id = thinkos_alloc_hi(thinkos_rt.th_alloc, THINKOS_THREADS_MAX); DCC_LOG2(LOG_INFO, "thinkos_alloc_hi() %d -> %d.", target_id, thread_id); } else { /* Look for the next available slot */ if (target_id < 0) target_id = 0; thread_id = thinkos_alloc_lo(thinkos_rt.th_alloc, target_id); DCC_LOG2(LOG_INFO, "thinkos_alloc_lo() %d -> %d.", target_id, thread_id); if (thread_id < 0) { thread_id = thinkos_alloc_hi(thinkos_rt.th_alloc, target_id); DCC_LOG2(LOG_INFO, "thinkos_alloc_hi() %d -> %d.", target_id, thread_id); } } if (thread_id < 0) { __thinkos_error(THINKOS_ERR_THREAD_ALLOC); arg[0] = THINKOS_EINVAL; return; } #else thread_id = target_id; if (thread_id >= THINKOS_THREADS_MAX) { __thinkos_error(THINKOS_ERR_THREAD_INVALID); arg[0] = THINKOS_EINVAL; return; } #endif sp = (uint32_t)init->stack_ptr + init->opt.stack_size; #if THINKOS_ENABLE_SANITY_CHECK if (init->opt.stack_size < sizeof(struct thinkos_context)) { DCC_LOG1(LOG_INFO, "stack too small. size=%d", init->opt.stack_size); __thinkos_error(THINKOS_ERR_THREAD_SMALLSTACK); arg[0] = THINKOS_EINVAL; return; } #endif #if THINKOS_ENABLE_STACK_INIT /* initialize stack */ __thinkos_memset32(init->stack_ptr, 0xdeadbeef, init->opt.stack_size); #endif #if THINKOS_ENABLE_THREAD_INFO thinkos_rt.th_inf[thread_id] = init->inf; #endif #if THINKOS_ENABLE_TIMESHARE thinkos_rt.sched_pri[thread_id] = init->opt.priority; if (thinkos_rt.sched_pri[thread_id] > THINKOS_SCHED_LIMIT_MAX) thinkos_rt.sched_pri[thread_id] = THINKOS_SCHED_LIMIT_MAX; /* update schedule limit */ if (thinkos_rt.sched_limit < thinkos_rt.sched_pri[thread_id]) { thinkos_rt.sched_limit = thinkos_rt.sched_pri[thread_id]; } thinkos_rt.sched_val[thread_id] = thinkos_rt.sched_limit / 2; #endif __thinkos_thread_init(thread_id, sp, init->task, init->arg); #if THINKOS_ENABLE_PAUSE if (!init->opt.paused) #endif { DCC_LOG(LOG_JABBER, "__thinkos_thread_resume()"); __thinkos_thread_resume(thread_id); DCC_LOG(LOG_JABBER, "__thinkos_defer_sched()"); __thinkos_defer_sched(); } /* Internal thread ids start form 0 whereas user thread numbers start form one ... */ arg[0] = thread_id + 1; }
void thinkos_ev_unmask_svc(int32_t * arg) { unsigned int wq = arg[0]; uint32_t mask = arg[1]; unsigned int no = wq - THINKOS_EVENT_BASE; unsigned int ev; int th; #if THINKOS_ENABLE_ARG_CHECK if (no >= THINKOS_EVENT_MAX) { DCC_LOG1(LOG_ERROR, "object %d is not an event set!", wq); arg[0] = THINKOS_EINVAL; return; } #if THINKOS_ENABLE_EVENT_ALLOC if (__bit_mem_rd(&thinkos_rt.ev_alloc, no) == 0) { DCC_LOG1(LOG_ERROR, "invalid event set %d!", wq); arg[0] = THINKOS_EINVAL; return; } #endif #endif cm3_cpsid_i(); /* unmask the events on the mask bitmap */ thinkos_rt.ev[no].mask |= mask; /* wake up the first unmasked thread if any. */ if ((ev = __clz(__rbit(thinkos_rt.ev[no].pend & mask))) < 32) { if ((th = __thinkos_wq_head(wq)) != THINKOS_THREAD_NULL) { /* a pending event was unmaksed and there is a thread waiting on the queue, clear the event pending flag and wakes up the thread. */ __bit_mem_wr(&thinkos_rt.ev[no].pend, ev, 0); /* wakeup from the event wait queue, set the return of the thread to the event */ __thinkos_wakeup_return(wq, th, ev); DCC_LOG3(LOG_TRACE, "<%d> waked up with event %d.%d", th, wq, ev); /* signal the scheduler ... */ __thinkos_defer_sched(); } else { /* no threads waiting */ cm3_cpsie_i(); return; } } /* wake up as many other threads as possible */ while ((ev = __clz(__rbit(thinkos_rt.ev[no].pend & mask))) < 32) { if ((th = __thinkos_wq_head(wq)) != THINKOS_THREAD_NULL) { /* a pending event was unmaksed and there is a thread waiting on the queue, clear the event pending flag and wakes up the thread. */ __bit_mem_wr(&thinkos_rt.ev[no].pend, ev, 0); /* wakeup from the event wait queue, set the return of the thread to the event */ __thinkos_wakeup_return(wq, th, ev); DCC_LOG3(LOG_TRACE, "<%d> waked up with event %d.%d", th, wq, ev); } else { /* no more threads waiting */ break; } } cm3_cpsie_i(); }
void thinkos_ev_timedwait_svc(int32_t * arg) { unsigned int wq = arg[0]; uint32_t ms = (uint32_t)arg[1]; unsigned int no = wq - THINKOS_EVENT_BASE; int self = thinkos_rt.active; unsigned int ev; #if THINKOS_ENABLE_ARG_CHECK if (no >= THINKOS_EVENT_MAX) { DCC_LOG1(LOG_ERROR, "object %d is not an event set!", wq); arg[0] = THINKOS_EINVAL; return; } #if THINKOS_ENABLE_EVENT_ALLOC if (__bit_mem_rd(&thinkos_rt.ev_alloc, no) == 0) { DCC_LOG1(LOG_ERROR, "invalid event set %d!", wq); arg[0] = THINKOS_EINVAL; return; } #endif #endif cm3_cpsid_i(); /* check for any pending unmasked event */ if ((ev = __clz(__rbit(thinkos_rt.ev[no].pend & thinkos_rt.ev[no].mask))) < 32) { __bit_mem_wr(&thinkos_rt.ev[no].pend, ev, 0); arg[0] = ev; cm3_cpsie_i(); return; } /* insert into the mutex wait queue */ __thinkos_tmdwq_insert(wq, self, ms); /* wait for event */ /* remove from the ready wait queue */ __bit_mem_wr(&thinkos_rt.wq_ready, thinkos_rt.active, 0); #if THINKOS_ENABLE_TIMESHARE /* if the ready queue is empty, collect the threads from the CPU wait queue */ if (thinkos_rt.wq_ready == 0) { thinkos_rt.wq_ready = thinkos_rt.wq_tmshare; thinkos_rt.wq_tmshare = 0; } #endif /* Set the default return value to timeout. The ev_rise() call will change it to the active event */ arg[0] = THINKOS_ETIMEDOUT; cm3_cpsie_i(); DCC_LOG2(LOG_INFO, "<%d> waiting for event %d...", self, wq); /* signal the scheduler ... */ __thinkos_defer_sched(); }