__inline static U8 bucketIndex(U16 size) { /*U8 i; for (i = 0; i <= 10; ++i) if ( size < (2 << i) ) return i;*/ return 31 - __clz(size); }
void thinkos_ev_wait_svc(int32_t * arg) { unsigned int wq = arg[0]; unsigned int no = wq - THINKOS_EVENT_BASE; int self = thinkos_rt.active; unsigned int ev; #if THINKOS_ENABLE_ARG_CHECK if (no >= THINKOS_EVENT_MAX) { DCC_LOG1(LOG_ERROR, "object %d is not an event set!", wq); arg[0] = THINKOS_EINVAL; return; } #if THINKOS_ENABLE_EVENT_ALLOC if (__bit_mem_rd(&thinkos_rt.ev_alloc, no) == 0) { DCC_LOG1(LOG_ERROR, "invalid event set %d!", wq); arg[0] = THINKOS_EINVAL; return; } #endif #endif cm3_cpsid_i(); /* check for any pending unmasked event */ if ((ev = __clz(__rbit(thinkos_rt.ev[no].pend & thinkos_rt.ev[no].mask))) < 32) { DCC_LOG2(LOG_MSG, "set=0x%08x msk=0x%08x", thinkos_rt.ev[no].pend, thinkos_rt.ev[no].mask); __bit_mem_wr(&thinkos_rt.ev[no].pend, ev, 0); DCC_LOG2(LOG_INFO, "pending event %d.%d!", wq, ev); arg[0] = ev; cm3_cpsie_i(); return; } /* insert into the wait queue */ __thinkos_wq_insert(wq, self); /* wait for event */ /* remove from the ready wait queue */ __bit_mem_wr(&thinkos_rt.wq_ready, thinkos_rt.active, 0); #if THINKOS_ENABLE_TIMESHARE /* if the ready queue is empty, collect the threads from the CPU wait queue */ if (thinkos_rt.wq_ready == 0) { thinkos_rt.wq_ready = thinkos_rt.wq_tmshare; thinkos_rt.wq_tmshare = 0; } #endif cm3_cpsie_i(); DCC_LOG2(LOG_INFO, "<%d> waiting for event %d.xx ...", self, wq); /* signal the scheduler ... */ __thinkos_defer_sched(); }
/* * Initialize IO pins and SPI low level device */ static int lattice_ice40_io_init(unsigned int freq) { struct stm32f_spi * spi = STM32F_SPI3; unsigned int div; int br; /* Enable peripheral clock */ stm32_clk_enable(STM32_RCC, STM32_CLK_GPIOB); stm32_clk_enable(STM32_RCC, STM32_CLK_GPIOC); stm32_clk_enable(STM32_RCC, STM32_CLK_GPIOE); stm32_clk_enable(STM32_RCC, STM32_CLK_SPI3); /* Configure IO pins */ stm32_gpio_mode(ICE40_CDONE, INPUT, PULL_UP); stm32_gpio_set(ICE40_CRESET); stm32_gpio_mode(ICE40_CRESET, OUTPUT, SPEED_MED); stm32_gpio_set(ICE40_SPI_SS); stm32_gpio_mode(ICE40_SPI_SS, OUTPUT, SPEED_MED); stm32_gpio_mode(ICE40_SPI_SCK, ALT_FUNC, PUSH_PULL | SPEED_LOW); stm32_gpio_af(ICE40_SPI_SCK, GPIO_AF6); stm32_gpio_mode(ICE40_SPI_SDO, ALT_FUNC, PULL_UP); stm32_gpio_af(ICE40_SPI_SDO, GPIO_AF6); stm32_gpio_mode(ICE40_SPI_SDI, ALT_FUNC, PUSH_PULL | SPEED_LOW); stm32_gpio_af(ICE40_SPI_SDI, GPIO_AF6); /* Configure SPI */ div = stm32_clk_hz(STM32_CLK_SPI3) / freq / 2; br = 31 - __clz(div); if (div > (1 << br)) br++; DCC_LOG3(LOG_TRACE, "SPI freq=%d div=%d br=%d", freq, div, br); spi->cr1 = 0; spi->cr2 = 0; spi->i2scfgr = 0; spi->i2spr = 0; /* Master mode, MSB first */ spi->cr1 = SPI_SPE | SPI_BR_SET(br) | SPI_MSTR | SPI_SSM | SPI_SSI; return 0; }
// ARM-LABEL: test_clz // ARM: call i32 @llvm.ctlz.i32(i32 %t, i1 false) uint32_t test_clz(uint32_t t) { return __clz(t); }
static int ETLSF_fls(uint32_t word) { const int bit = word ? 32 - __clz(word) : 0; return bit - 1; }
static int ETLSF_ffs(uint32_t word) { const unsigned int reverse = word & (~word + 1); const int bit = 32 - __clz(reverse); return bit - 1; }
int stm32f_spi_init(struct stm32f_spi * spi, const struct stm32f_spi_io * spi_io, unsigned int freq, unsigned int opt) { struct stm32_rcc * rcc = STM32_RCC; gpio_io_t io; uint32_t div; int br; int id; if ((id = stm32f_spi_lookup(spi)) < 0) { /* invalid SPI ??? */ return id; } /* Configure IO pins */ io = spi_io->miso; stm32_gpio_clock_en(STM32_GPIO(io.port)); stm32_gpio_mode(STM32_GPIO(io.port), io.pin, ALT_FUNC, PULL_UP | SPEED_MED); stm32_gpio_af(STM32_GPIO(io.port), io.pin, spi_cfg[id].af); io = spi_io->mosi; stm32_gpio_clock_en(STM32_GPIO(io.port)); stm32_gpio_mode(STM32_GPIO(io.port), io.pin, ALT_FUNC, PUSH_PULL | SPEED_MED); stm32_gpio_af(STM32_GPIO(io.port), io.pin, spi_cfg[id].af); io = spi_io->sck; stm32_gpio_clock_en(STM32_GPIO(io.port)); stm32_gpio_mode(STM32_GPIO(io.port), io.pin, ALT_FUNC, PUSH_PULL | SPEED_MED); stm32_gpio_af(STM32_GPIO(io.port), io.pin, spi_cfg[id].af); /* Enable peripheral clock */ if (spi_cfg[id].apb2) { rcc->apb2enr |= (1 << spi_cfg[id].ckbit); div = stm32f_apb2_hz / freq / 2; } else { rcc->apb1enr |= (1 << spi_cfg[id].ckbit); div = stm32f_apb1_hz / freq / 2; } br = 31 - __clz(div); if (div > (1 << br)) { br++; } DCC_LOG3(LOG_TRACE, "SPI id=%d div=%d br=%d", id, div, br); spi->cr1 = 0; spi->cr2 = 0; spi->i2scfgr = 0; spi->i2spr = 0; spi->cr1 = SPI_SPE | SPI_BR_SET(br) | opt | SPI_SSM | SPI_SSI; #if 0 spi->cr1 = SPI_SPE | SPI_MSTR | SPI_SSM | SPI_SSI | \ SPI_BR_SET(br) | SPI_LSBFIRST; #endif return id; }
void thinkos_ev_unmask_svc(int32_t * arg) { unsigned int wq = arg[0]; uint32_t mask = arg[1]; unsigned int no = wq - THINKOS_EVENT_BASE; unsigned int ev; int th; #if THINKOS_ENABLE_ARG_CHECK if (no >= THINKOS_EVENT_MAX) { DCC_LOG1(LOG_ERROR, "object %d is not an event set!", wq); arg[0] = THINKOS_EINVAL; return; } #if THINKOS_ENABLE_EVENT_ALLOC if (__bit_mem_rd(&thinkos_rt.ev_alloc, no) == 0) { DCC_LOG1(LOG_ERROR, "invalid event set %d!", wq); arg[0] = THINKOS_EINVAL; return; } #endif #endif cm3_cpsid_i(); /* unmask the events on the mask bitmap */ thinkos_rt.ev[no].mask |= mask; /* wake up the first unmasked thread if any. */ if ((ev = __clz(__rbit(thinkos_rt.ev[no].pend & mask))) < 32) { if ((th = __thinkos_wq_head(wq)) != THINKOS_THREAD_NULL) { /* a pending event was unmaksed and there is a thread waiting on the queue, clear the event pending flag and wakes up the thread. */ __bit_mem_wr(&thinkos_rt.ev[no].pend, ev, 0); /* wakeup from the event wait queue, set the return of the thread to the event */ __thinkos_wakeup_return(wq, th, ev); DCC_LOG3(LOG_TRACE, "<%d> waked up with event %d.%d", th, wq, ev); /* signal the scheduler ... */ __thinkos_defer_sched(); } else { /* no threads waiting */ cm3_cpsie_i(); return; } } /* wake up as many other threads as possible */ while ((ev = __clz(__rbit(thinkos_rt.ev[no].pend & mask))) < 32) { if ((th = __thinkos_wq_head(wq)) != THINKOS_THREAD_NULL) { /* a pending event was unmaksed and there is a thread waiting on the queue, clear the event pending flag and wakes up the thread. */ __bit_mem_wr(&thinkos_rt.ev[no].pend, ev, 0); /* wakeup from the event wait queue, set the return of the thread to the event */ __thinkos_wakeup_return(wq, th, ev); DCC_LOG3(LOG_TRACE, "<%d> waked up with event %d.%d", th, wq, ev); } else { /* no more threads waiting */ break; } } cm3_cpsie_i(); }
void thinkos_ev_timedwait_svc(int32_t * arg) { unsigned int wq = arg[0]; uint32_t ms = (uint32_t)arg[1]; unsigned int no = wq - THINKOS_EVENT_BASE; int self = thinkos_rt.active; unsigned int ev; #if THINKOS_ENABLE_ARG_CHECK if (no >= THINKOS_EVENT_MAX) { DCC_LOG1(LOG_ERROR, "object %d is not an event set!", wq); arg[0] = THINKOS_EINVAL; return; } #if THINKOS_ENABLE_EVENT_ALLOC if (__bit_mem_rd(&thinkos_rt.ev_alloc, no) == 0) { DCC_LOG1(LOG_ERROR, "invalid event set %d!", wq); arg[0] = THINKOS_EINVAL; return; } #endif #endif cm3_cpsid_i(); /* check for any pending unmasked event */ if ((ev = __clz(__rbit(thinkos_rt.ev[no].pend & thinkos_rt.ev[no].mask))) < 32) { __bit_mem_wr(&thinkos_rt.ev[no].pend, ev, 0); arg[0] = ev; cm3_cpsie_i(); return; } /* insert into the mutex wait queue */ __thinkos_tmdwq_insert(wq, self, ms); /* wait for event */ /* remove from the ready wait queue */ __bit_mem_wr(&thinkos_rt.wq_ready, thinkos_rt.active, 0); #if THINKOS_ENABLE_TIMESHARE /* if the ready queue is empty, collect the threads from the CPU wait queue */ if (thinkos_rt.wq_ready == 0) { thinkos_rt.wq_ready = thinkos_rt.wq_tmshare; thinkos_rt.wq_tmshare = 0; } #endif /* Set the default return value to timeout. The ev_rise() call will change it to the active event */ arg[0] = THINKOS_ETIMEDOUT; cm3_cpsie_i(); DCC_LOG2(LOG_INFO, "<%d> waiting for event %d...", self, wq); /* signal the scheduler ... */ __thinkos_defer_sched(); }
static __inline u32 __clo(u32 rt) { return __clz(~rt); }
static okl4_word_t pagesize_to_bits(okl4_word_t n) { assert(n != 0); return 31 - __clz(n); }