void * skub_alloc_sz(int size) { /* Find a free block */ int i; void * ret = NULL; if (size > skub_pools_var[ARRAY_NELEMS(skub_pools_var) - 1].sz) { outputf("alloc %d: too big for any pool"); return NULL; } for (i = 0; i < ARRAY_NELEMS(skub_pools_var); i++) { if (skub_pools_var[i].sz < size) continue; ret = skub_alloc_from_pool(&skub_pools_var[i]); if (ret) break; outputf("alloc %d: oom in %d, trying larger...", size, skub_pools_var[i].sz); } #ifdef SKUB_SPEW outputf("alloc sz %d -> used block %d @ %p", size, i, ret); #endif return ret; }
void COLD skub_init(void) { int i; for (i = 0; i < ARRAY_NELEMS(skub_pools_var); i++) { skub_init_pool(&skub_pools_var[i]); } for (i = 0; i < ARRAY_NELEMS(skub_pools_fixed); i++) { skub_init_pool(&skub_pools_fixed[i]); } }
char * strerror_l(int errnum, char *strerrbuf, size_t buflen) { if ((errnum < 1) || (errnum >= ARRAY_NELEMS(sys_errlist))) { errnum = 0; } if (buflen) { size_t errstrlen = strlen(sys_errlist[errnum]) + 1; if (errstrlen < buflen) { buflen = errstrlen; } else { buflen -= 1; strerrbuf[buflen] = '\0'; } } return memcpy(strerrbuf, sys_errlist[errnum], buflen); }
char * strerror(int errnum) { static char strerr[32]; return strerror_l(errnum, strerr, ARRAY_NELEMS(strerr)); }
static int elf_loader_construct_phdr(const struct vmem *as, const Elf32_Phdr * elf_phdr, const unsigned char *img, struct vmem *dst_as) { static int (*const construct_phdr[]) (const struct vmem *, const Elf32_Phdr *, const unsigned char *, struct vmem *) = { elf_loader_construct_phdr_null, elf_loader_construct_phdr_load}; /* * some sanity checks */ if (!(elf_phdr->p_type < ARRAY_NELEMS(construct_phdr)) || !construct_phdr[elf_phdr->p_type]) { return 0; } return construct_phdr[elf_phdr->p_type] (as, elf_phdr, img, dst_as); }
int elf_loader_is_elf(const unsigned char *img) { static const char ident[4] = { ELFMAG0, ELFMAG1, ELFMAG2, ELFMAG3 }; return !memcmp(img, ident, ARRAY_NELEMS(ident)); }
/** * \brief select a runnable thread * \return the tcb of a runnable thread, or NULL otherwise * \internal */ static struct tcb* sched_select_thread(void) { const struct list* min_prio = g_thread; const struct list* cur_prio = min_prio + ARRAY_NELEMS(g_thread); while (cur_prio > min_prio) { --cur_prio; struct list* listhead = list_begin(cur_prio); while (listhead != list_end(cur_prio)) { struct tcb* tcb = tcb_of_sched_list(listhead); if (tcb_is_runnable(tcb)) { /* found runnable thread with highest prio */ return tcb; } listhead = list_next(listhead); } } /* there should always be an idle thread runnable */ assert_never_reach(); return NULL; }
/** * \brief switch to a specific thread * \param cpu the CPU on which the thread is running * \param[in] next the tcb of the destination thread * \return 0 on success, or a negative error code otherwise * * \attention The destination thread has to be in runnable state. */ int sched_switch_to(unsigned int cpu, struct tcb* next) { assert(cpu < ARRAY_NELEMS(g_current_thread)); assert(next && tcb_is_runnable(next)); /* We move the new thread to the end of the scheduler's * ready list, so we don't select it over and over again. */ move_thread_to_back(next); struct tcb* self = g_current_thread[cpu]; if (next == self) { return 0; /* nothing to do if we're switching to the same thread */ } /* Calling tcb_switch() means scheduling another thread, We set * the new thread as the current one _before_ switching, or it * won't know it's own TCB structure otherwise. */ g_current_thread[cpu] = next; int res = tcb_switch(self, next); if (res < 0) { goto err_tcb_switch; } return 0; err_tcb_switch: /* tcb_switch() failed, so we set ourselfes as the current * thread. */ g_current_thread[cpu] = self; return res; }
/** * \brief return the thread that is currently scheduled on the CPU * \param cpu the CPU on which the thread is running * \return the currently scheduled thread's tcb structure */ struct tcb* sched_get_current_thread(unsigned int cpu) { assert(cpu < ARRAY_NELEMS(g_current_thread)); return g_current_thread[cpu]; }
void skub_free_sz(void *ptr) { /* Figure out which block this was allocated in. */ int i; for (i = 0; i < ARRAY_NELEMS(skub_pools_var); i++) { const struct skub_pool_info *pool = &skub_pools_var[i]; int offset = (uint32_t) ptr - (uint32_t) pool->pool; if (offset < 0 || offset >= pool->sz * pool->max) continue; ASSERT_EQUAL(offset % pool->sz, 0); int idx = offset / pool->sz; /* Bit-band writes are atomic. */ #ifdef PC_BUILD pool->bitmask[idx / 32] |= (1 << (idx % 32)); #else *(uint32_t *)BITBAND_SRAM((uint32_t) pool->bitmask, idx) = 1; #endif return; } panic("skub_free_sz: %p not in any pool", ptr); }
void init_irq_handling(int (*enable_irq)(unsigned char), void (*disable_irq)(unsigned char)) { struct list* head = g_irq_handling.irqh; const struct list* head_end = head + ARRAY_NELEMS(g_irq_handling.irqh); for (; head < head_end; ++head) { list_init_head(head); } g_irq_handling.enable_irq = enable_irq; g_irq_handling.disable_irq = disable_irq; }
/** * \brief init scheduler * \param[in] idle the initial idle thread * \return 0 on success, or a negative error code otherwise * * This initializes the scheduler. The passed thread is the idle * thread. It is added to the thread list automatically. */ int sched_init(struct tcb* idle) { assert(idle); for (size_t i = 0; i < ARRAY_NELEMS(g_current_thread); ++i) { g_current_thread[i] = idle; } for (size_t i = 0; i < ARRAY_NELEMS(g_thread); ++i) { list_init_head(g_thread + i); } alarm_init(&g_alarm, alarm_handler); int res = timer_add_alarm(&g_alarm, sched_timeout()); if (res < 0) { goto err_timer_add_alarm; } res = sched_add_thread(idle, 0); if (res < 0) { goto err_sched_add_thread; } return 0; err_sched_add_thread: timer_remove_alarm(&g_alarm); err_timer_add_alarm: for (size_t i = ARRAY_NELEMS(g_current_thread); i;) { --i; g_current_thread[i] = NULL; } return res; }
const struct pmem_area * pmem_area_get_by_frame(os_index_t pgindex) { const struct pmem_area *beg, *end; beg = g_pmem_area; end = g_pmem_area + ARRAY_NELEMS(g_pmem_area); while (beg < end) { if (pmem_area_contains_frame(beg, pgindex)) { return beg; } ++beg; } return NULL; }
/** * \brief search for a thread with the task and thread id * \param taskid a task id * \param tcbid a thread id * \return the found thread, or NULL otherwise */ struct tcb* sched_search_thread(unsigned int taskid, unsigned char tcbid) { const struct list* cur_prio = g_thread; const struct list* end_prio = cur_prio + ARRAY_NELEMS(g_thread); while (cur_prio < end_prio) { struct list* listhead = list_begin(cur_prio); while (listhead != list_end(cur_prio)) { struct tcb* tcb = tcb_of_sched_list(listhead); if ((tcb->id == tcbid) && (tcb->task->id == taskid)) { return tcb; } listhead = list_next(listhead); } ++cur_prio; } return NULL; }