void initNanoObjects(void) { nano_lifo_init(&test_lifo); /* Initialize the LIFO */ nano_sem_init(&taskWaitSem); /* Initialize the task waiting semaphore */ nano_sem_init(&fiberWaitSem); /* Initialize the fiber waiting semaphore */ nano_timer_init(&timer, timerData); nano_lifo_init(&multi_waiters); nano_sem_init(&reply_multi_waiters); TC_PRINT("Nano objects initialized\n"); }
void initNanoObjects(void) { nano_fifo_init(&nanoFifoObj); nano_fifo_init(&nanoFifoObj2); nano_sem_init(&nanoSemObj1); nano_sem_init(&nanoSemObj2); nano_sem_init(&nanoSemObj3); nano_sem_init(&nanoSemObjTask); nano_timer_init(&timer, timerData); } /* initNanoObjects */
static void l2cap_chan_tx_init(struct bt_l2cap_le_chan *chan) { BT_DBG("chan %p", chan); memset(&chan->tx, 0, sizeof(chan->tx)); nano_sem_init(&chan->tx.credits); }
void initNanoObjects(void) { struct isrInitInfo i = { {isr_sem_give, isr_sem_take}, {&isrSemInfo, &isrSemInfo}, }; (void)initIRQ(&i); nano_sem_init(&testSem); nano_sem_init(&multi_waiters); nano_sem_init(&reply_multi_waiters); nano_timer_init(&timer, timerData); TC_PRINT("Nano objects initialized\n"); }
int adc_qmsi_init(struct device *dev) { qm_adc_config_t cfg; struct adc_info *info = dev->driver_data; dev->driver_api = &api_funcs; /* Enable the ADC and set the clock divisor */ clk_periph_enable(CLK_PERIPH_CLK | CLK_PERIPH_ADC | CLK_PERIPH_ADC_REGISTER); /* ADC clock divider*/ clk_adc_set_div(CONFIG_ADC_QMSI_CLOCK_RATIO); /* Set up config */ /* Clock cycles between the start of each sample */ cfg.window = CONFIG_ADC_QMSI_SERIAL_DELAY; cfg.resolution = CONFIG_ADC_QMSI_SAMPLE_WIDTH; qm_adc_set_config(QM_ADC_0, &cfg); device_sync_call_init(&info->sync); nano_sem_init(&info->sem); nano_sem_give(&info->sem); info->state = ADC_STATE_IDLE; adc_config_irq(); return 0; }
void initNanoObjects(void) { nano_stack_init(&nanoStackObj, stack1); nano_stack_init(&nanoStackObj2, stack2); nano_sem_init(&nanoSemObj); nano_timer_init(&timer, timerData); } /* initNanoObjects */
int bmg160_init(struct device *dev) { struct bmg160_device_config *cfg = dev->config->config_info; struct bmg160_device_data *bmg160 = dev->driver_data; uint8_t chip_id = 0; uint16_t range_dps; bmg160->i2c = device_get_binding((char *)cfg->i2c_port); if (!bmg160->i2c) { SYS_LOG_DBG("I2C master controller not found!"); return -EINVAL; } nano_sem_init(&bmg160->sem); nano_sem_give(&bmg160->sem); if (bmg160_read_byte(dev, BMG160_REG_CHIPID, &chip_id) < 0) { SYS_LOG_DBG("Failed to read chip id."); return -EIO; } if (chip_id != BMG160_CHIP_ID) { SYS_LOG_DBG("Unsupported chip detected (0x%x)!", chip_id); return -ENODEV; } /* reset the chip */ bmg160_write_byte(dev, BMG160_REG_BGW_SOFTRESET, BMG160_RESET); sys_thread_busy_wait(1000); /* wait for the chip to come up */ if (bmg160_write_byte(dev, BMG160_REG_RANGE, BMG160_DEFAULT_RANGE) < 0) { SYS_LOG_DBG("Failed to set range."); return -EIO; } range_dps = bmg160_gyro_range_map[BMG160_DEFAULT_RANGE]; bmg160->scale = BMG160_RANGE_TO_SCALE(range_dps); if (bmg160_write_byte(dev, BMG160_REG_BW, BMG160_DEFAULT_ODR) < 0) { SYS_LOG_DBG("Failed to set sampling frequency."); return -EIO; } /* disable interrupts */ if (bmg160_write_byte(dev, BMG160_REG_INT_EN0, 0) < 0) { SYS_LOG_DBG("Failed to disable all interrupts."); return -EIO; } #ifdef CONFIG_BMG160_TRIGGER bmg160_trigger_init(dev); #endif dev->driver_api = &bmg160_api; return 0; }
/** * @brief Manokernel entry point. * * @details Start the kernel event data colector fiber. Then * do wait forever. * @return No return value. */ int main(void) { int i; #ifdef CONFIG_MICROKERNEL tmon_index = 0; #endif kernel_event_logger_fiber_start(); /* initialize philosopher semaphores */ for (i = 0; i < N_PHILOSOPHERS; i++) { nano_sem_init(&forks[i]); nano_task_sem_give(&forks[i]); } /* create philosopher fibers */ for (i = 0; i < N_PHILOSOPHERS; i++) { task_fiber_start(&philStack[i][0], STSIZE, (nano_fiber_entry_t) philEntry, 0, 0, 6, 0); } task_fiber_start(&philStack[N_PHILOSOPHERS][0], STSIZE, (nano_fiber_entry_t) fork_manager_entry, 0, 0, 6, 0); /* wait forever */ while (1) { extern void nano_cpu_idle(void); nano_cpu_idle(); } }
void RegressionTaskEntry(void) { int tcRC; nano_sem_init(&test_nano_timers_sem); PRINT_DATA("Starting timer tests\n"); PRINT_LINE; task_fiber_start(test_nano_timers_stack, 512, test_nano_timers, 0, 0, 5, 0); /* Test the task_timer_alloc() API */ TC_PRINT("Test the allocation of timers\n"); tcRC = testLowTimerGet(); if (tcRC != TC_PASS) { goto exitRtn; } TC_PRINT("Test the one shot feature of a timer\n"); tcRC = testLowTimerOneShot(); if (tcRC != TC_PASS) { goto exitRtn; } TC_PRINT("Test that a timer does not start\n"); tcRC = testLowTimerDoesNotStart(); if (tcRC != TC_PASS) { goto exitRtn; } TC_PRINT("Test the periodic feature of a timer\n"); tcRC = testLowTimerPeriodicity(); if (tcRC != TC_PASS) { goto exitRtn; } TC_PRINT("Test the stopping of a timer\n"); tcRC = testLowTimerStop(); if (tcRC != TC_PASS) { goto exitRtn; } TC_PRINT("Verifying the nanokernel timer fired\n"); if (!nano_task_sem_take(&test_nano_timers_sem)) { tcRC = TC_FAIL; goto exitRtn; } TC_PRINT("Verifying the nanokernel timeouts worked\n"); tcRC = task_sem_take_wait_timeout(test_nano_timeouts_sem, SECONDS(5)); tcRC = tcRC == RC_OK ? TC_PASS : TC_FAIL; exitRtn: TC_END_RESULT(tcRC); TC_END_REPORT(tcRC); }
/** * Initialize the resources used by the framework's sync services. * * IMPORTANT : this function must be called during the initialization * of the OS abstraction layer. * this function shall only be called once after reset, otherwise * it may cause the queue services to fail */ void framework_init_queue (void) { #if defined (CONFIG_NANOKERNEL) && ! defined (ZEPHYR_MICRO_OS_ABSTRACTION_USE_SINGLE_POOL_LOCK) nano_sem_init (&QueuePoolLockSem); nano_task_sem_give (&QueuePoolLockSem); #endif ; /* Nothing to do */ }
void initNanoObjects(void) { struct isrInitInfo i = { {isr_fifo_put, isr_fifo_get}, {&isrFifoInfo, &isrFifoInfo}, }; (void)initIRQ(&i); nano_fifo_init(&nanoFifoObj); nano_fifo_init(&nanoFifoObj2); nano_sem_init(&nanoSemObj1); nano_sem_init(&nanoSemObj2); nano_sem_init(&nanoSemObj3); nano_sem_init(&nanoSemObjTask); nano_timer_init(&timer, timerData); } /* initNanoObjects */
static void l2cap_chan_rx_init(struct bt_l2cap_le_chan *chan) { BT_DBG("chan %p", chan); /* Use existing MTU if defined */ if (!chan->rx.mtu) { chan->rx.mtu = BT_L2CAP_MAX_LE_MTU; } chan->rx.mps = BT_L2CAP_MAX_LE_MPS; nano_sem_init(&chan->rx.credits); }
static inline bool prepare_for_ack(struct net_buf *buf) { if (packetbuf_attr(buf, PACKETBUF_ATTR_MAC_ACK) != 0) { PRINTF("simplerdc: ACK requested\n"); nano_sem_init(&ack_lock); ack_received = false; return true; } return false; }
int initNanoObjects(void) { nano_sem_init(&wakeFiber); nano_timer_init(&timer, timerData); nano_fifo_init(&timeout_order_fifo); /* no nanoCpuExcConnect on Cortex-M3/M4 */ #if !defined(CONFIG_CPU_CORTEX_M3_M4) nanoCpuExcConnect(IV_DIVIDE_ERROR, exc_divide_error_handler); #endif return TC_PASS; }
void net_context_init(void) { int i; nano_sem_init(&contexts_lock); memset(contexts, 0, sizeof(contexts)); for (i = 0; i < NET_MAX_CONTEXT; i++) { nano_fifo_init(&contexts[i].rx_queue); } context_sem_give(&contexts_lock); }
static inline uint8_t wait_for_ack(bool broadcast, bool ack_required) { if (broadcast || !ack_required) { return MAC_TX_OK; } if (nano_sem_take(&ack_lock, MSEC(10)) == 0) { nano_sem_init(&ack_lock); } if (!ack_received) { return MAC_TX_NOACK; } return MAC_TX_OK; }
/** * * @brief The test main function * * @return 0 on success */ int nanoIntToFiberSem(void) { PRINT_FORMAT(" 3- Measure time from ISR to executing a different fiber" " (rescheduled)"); nano_sem_init(&testSema); TICK_SYNCH(); task_fiber_start(&waiterStack[0], STACKSIZE, (nano_fiber_entry_t) fiberWaiter, 0, 0, 5, 0); task_fiber_start(&intStack[0], STACKSIZE, (nano_fiber_entry_t) fiberInt, 0, 0, 6, 0); PRINT_FORMAT(" switching time is %lu tcs = %lu nsec", timestamp, SYS_CLOCK_HW_CYCLES_TO_NS(timestamp)); return 0; }
int tmp007_init_interrupt(struct device *dev) { struct tmp007_data *drv_data = dev->driver_data; int rc; rc = tmp007_reg_update(drv_data, TMP007_REG_CONFIG, TMP007_ALERT_EN_BIT, TMP007_ALERT_EN_BIT); if (rc != 0) { SYS_LOG_DBG("Failed to enable interrupt pin!"); return -EIO; } /* setup gpio interrupt */ drv_data->gpio = device_get_binding(CONFIG_TMP007_GPIO_DEV_NAME); if (drv_data->gpio == NULL) { SYS_LOG_DBG("Failed to get pointer to %s device!", CONFIG_TMP007_GPIO_DEV_NAME); return -EINVAL; } gpio_pin_configure(drv_data->gpio, CONFIG_TMP007_GPIO_PIN_NUM, GPIO_DIR_IN | GPIO_INT | GPIO_INT_LEVEL | GPIO_INT_ACTIVE_HIGH | GPIO_INT_DEBOUNCE); gpio_init_callback(&drv_data->gpio_cb, tmp007_gpio_callback, BIT(CONFIG_TMP007_GPIO_PIN_NUM)); rc = gpio_add_callback(drv_data->gpio, &drv_data->gpio_cb); if (rc != 0) { SYS_LOG_DBG("Failed to set gpio callback!"); return -EIO; } #if defined(CONFIG_TMP007_TRIGGER_OWN_FIBER) nano_sem_init(&drv_data->gpio_sem); fiber_start(drv_data->fiber_stack, CONFIG_TMP007_FIBER_STACK_SIZE, (nano_fiber_entry_t)tmp007_fiber, POINTER_TO_INT(dev), 0, CONFIG_TMP007_FIBER_PRIORITY, 0); #elif defined(CONFIG_TMP007_TRIGGER_GLOBAL_FIBER) drv_data->work.handler = tmp007_fiber_cb; drv_data->work.arg = dev; #endif return 0; }
int sol_mainloop_impl_platform_init(void) { int i; main_thread_id = sys_thread_self_get(); nano_sem_init(&_sol_mainloop_lock); nano_sem_give(&_sol_mainloop_lock); nano_fifo_init(&_sol_mainloop_pending_events); nano_fifo_init(&_sol_mainloop_free_events); for (i = 0; i < sol_util_array_size(_events); i++) { struct me_fifo_entry *mfe; mfe = &_events[i]; nano_fifo_put(&_sol_mainloop_free_events, mfe); } return 0; }
void fiberEntry(void) { struct nano_timer timer; uint32_t data[2] = {0, 0}; nano_sem_init(&nanoSemFiber); nano_timer_init(&timer, data); while (1) { /* wait for task to let us have a turn */ nano_fiber_sem_take_wait(&nanoSemFiber); /* say "hello" */ PRINT("%s: Hello World!\n", __FUNCTION__); /* wait a while, then let task have a turn */ nano_fiber_timer_start(&timer, SLEEPTICKS); nano_fiber_timer_wait(&timer); nano_fiber_sem_give(&nanoSemTask); } }
/** * Initialize the resources used by the framework's timer services * * IMPORTANT : this function must be called during the initialization * of the OS abstraction layer. * this function shall only be called once after reset, otherwise * it may cause the take/lock and give/unlock services to fail */ void framework_init_timer(void) { uint8_t idx; #ifdef CONFIG_NANOKERNEL nano_sem_init ( &g_TimerSem ); nano_timer_init (&g_NanoTimer, &g_NanoTimerData); #else g_TimerSem = OS_TIMER_SEM; #endif /* start with empty list of active timers: */ g_CurrentTimerHead = NULL; /* memset ( g_TimerPool_elements, 0 ): */ for (idx = 0; idx < TIMER_POOL_SIZE; idx++) { g_TimerPool_elements[idx].desc.callback = NULL; g_TimerPool_elements[idx].desc.data = NULL; g_TimerPool_elements[idx].desc.delay = 0; g_TimerPool_elements[idx].desc.expiration = 0; g_TimerPool_elements[idx].desc.repeat = false; g_TimerPool_elements[idx].prev = NULL; g_TimerPool_elements[idx].next = NULL; /* hopefully, the init function is performed before * timer_create and timer_stop can be called, * hence there is no need for a critical section * here */ } /* start "timer_task" in a new fiber for NanoK, or a new task for microK */ #ifdef CONFIG_NANOKERNEL fiber_fiber_start ((char *) g_TimerFiberStack, TIMER_CBK_TASK_STACK_SIZE, timer_task,0,0, TIMER_CBK_TASK_PRIORITY, TIMER_CBK_TASK_OPTIONS); #else task_start(OS_TASK_TIMER); #endif }
int main(void) { int i; PRINTF(DEMO_DESCRIPTION, "fibers", "nanokernel"); for (i = 0; i < N_PHILOSOPHERS; i++) { nano_sem_init(&forks[i]); nano_task_sem_give(&forks[i]); } /* create philosopher fibers */ for (i = 0; i < N_PHILOSOPHERS; i++) { task_fiber_start(&philStack[i][0], STSIZE, (nano_fiber_entry_t) philEntry, 0, 0, 6, 0); } /* wait forever */ while (1) { extern void nano_cpu_idle(void); nano_cpu_idle(); } }
void main(void) { struct nano_timer timer; uint32_t data[2] = {0, 0}; task_fiber_start(&fiberStack[0], STACKSIZE, (nano_fiber_entry_t) fiberEntry, 0, 0, 7, 0); nano_sem_init(&nanoSemTask); nano_timer_init(&timer, data); while (1) { /* say "hello" */ PRINT("%s: Hello Screen!\n", __FUNCTION__); /* wait a while, then let fiber have a turn */ nano_task_timer_start(&timer, SLEEPTICKS); nano_task_timer_wait(&timer); nano_task_sem_give(&nanoSemFiber); /* now wait for fiber to let us have a turn */ nano_task_sem_take_wait(&nanoSemTask); } }
void testFiberInit(void) { nano_sem_init(&fiberSem); task_fiber_start(fiberStack, FIBER_STACK_SIZE, (nano_fiber_entry_t)testFiberEntry, 0, 0, FIBER_PRIORITY, 0); }
int bma280_init_interrupt(struct device *dev) { struct bma280_data *drv_data = dev->driver_data; int rc; /* set latched interrupts */ rc = i2c_reg_write_byte(drv_data->i2c, BMA280_I2C_ADDRESS, BMA280_REG_INT_RST_LATCH, BMA280_BIT_INT_LATCH_RESET | BMA280_INT_MODE_LATCH); if (rc != 0) { SYS_LOG_DBG("Could not set latched interrupts"); return -EIO; } /* setup data ready gpio interrupt */ drv_data->gpio = device_get_binding(CONFIG_BMA280_GPIO_DEV_NAME); if (drv_data->gpio == NULL) { SYS_LOG_DBG("Cannot get pointer to %s device", CONFIG_BMA280_GPIO_DEV_NAME); return -EINVAL; } gpio_pin_configure(drv_data->gpio, CONFIG_BMA280_GPIO_PIN_NUM, GPIO_DIR_IN | GPIO_INT | GPIO_INT_LEVEL | GPIO_INT_ACTIVE_HIGH | GPIO_INT_DEBOUNCE); gpio_init_callback(&drv_data->gpio_cb, bma280_gpio_callback, BIT(CONFIG_BMA280_GPIO_PIN_NUM)); rc = gpio_add_callback(drv_data->gpio, &drv_data->gpio_cb); if (rc != 0) { SYS_LOG_DBG("Could not set gpio callback"); return -EIO; } /* map data ready interrupt to INT1 */ rc = i2c_reg_update_byte(drv_data->i2c, BMA280_I2C_ADDRESS, BMA280_REG_INT_MAP_1, BMA280_INT_MAP_1_BIT_DATA, BMA280_INT_MAP_1_BIT_DATA); if (rc != 0) { SYS_LOG_DBG("Could not map data ready interrupt pin"); return -EIO; } /* map any-motion interrupt to INT1 */ rc = i2c_reg_update_byte(drv_data->i2c, BMA280_I2C_ADDRESS, BMA280_REG_INT_MAP_0, BMA280_INT_MAP_0_BIT_SLOPE, BMA280_INT_MAP_0_BIT_SLOPE); if (rc != 0) { SYS_LOG_DBG("Could not map any-motion interrupt pin"); return -EIO; } /* disable data ready interrupt */ rc = i2c_reg_update_byte(drv_data->i2c, BMA280_I2C_ADDRESS, BMA280_REG_INT_EN_1, BMA280_BIT_DATA_EN, 0); if (rc != 0) { SYS_LOG_DBG("Could not disable data ready interrupt"); return -EIO; } /* disable any-motion interrupt */ rc = i2c_reg_update_byte(drv_data->i2c, BMA280_I2C_ADDRESS, BMA280_REG_INT_EN_0, BMA280_SLOPE_EN_XYZ, 0); if (rc != 0) { SYS_LOG_DBG("Could not disable data ready interrupt"); return -EIO; } #if defined(CONFIG_BMA280_TRIGGER_OWN_FIBER) nano_sem_init(&drv_data->gpio_sem); fiber_start(drv_data->fiber_stack, CONFIG_BMA280_FIBER_STACK_SIZE, (nano_fiber_entry_t)bma280_fiber, POINTER_TO_INT(dev), 0, CONFIG_BMA280_FIBER_PRIORITY, 0); #elif defined(CONFIG_BMA280_TRIGGER_GLOBAL_FIBER) drv_data->work.handler = bma280_fiber_cb; drv_data->work.arg = dev; #endif gpio_pin_enable_callback(drv_data->gpio, CONFIG_BMA280_GPIO_PIN_NUM); return 0; }
void sys_event_logger_init(struct event_logger *logger, uint32_t *logger_buffer, uint32_t buffer_size) { sys_ring_buf_init(&logger->ring_buf, buffer_size, logger_buffer); nano_sem_init(&(logger->sync_sema)); }
/* the timeout test entry point */ static int test_timeout(void) { int64_t orig_ticks; int32_t timeout; int rv; int test_data_size; struct reply_packet reply_packet; nano_sem_init(&sem_timeout[0]); nano_sem_init(&sem_timeout[1]); nano_fifo_init(&timeout_order_fifo); /* test nano_task_sem_take_wait_timeout() with timeout */ timeout = 10; orig_ticks = nano_tick_get(); rv = nano_task_sem_take_wait_timeout(&sem_timeout[0], timeout); if (rv) { TC_ERROR(" *** timeout of %d did not time out.\n", timeout); return TC_FAIL; } if ((nano_tick_get() - orig_ticks) < timeout) { TC_ERROR(" *** task did not wait long enough on timeout of %d.\n", timeout); return TC_FAIL; } /* test nano_task_sem_take_wait_timeout with timeout of 0 */ rv = nano_task_sem_take_wait_timeout(&sem_timeout[0], 0); if (rv) { TC_ERROR(" *** timeout of 0 did not time out.\n"); return TC_FAIL; } /* test nano_task_sem_take_wait_timeout with timeout > 0 */ TC_PRINT("test nano_task_sem_take_wait_timeout with timeout > 0\n"); timeout = 3; orig_ticks = nano_tick_get(); rv = nano_task_sem_take_wait_timeout(&sem_timeout[0], timeout); if (rv) { TC_ERROR(" *** timeout of %d did not time out.\n", timeout); return TC_FAIL; } if (!is_timeout_in_range(orig_ticks, timeout)) { return TC_FAIL; } TC_PRINT("nano_task_sem_take_wait_timeout timed out as expected\n"); /* * test nano_task_sem_take_wait_timeout with a timeout and fiber that gives * the semaphore on time */ timeout = 5; orig_ticks = nano_tick_get(); task_fiber_start(timeout_stacks[0], FIBER_STACKSIZE, test_fiber_give_timeout, (int)&sem_timeout[0], timeout, FIBER_PRIORITY, 0); rv = nano_task_sem_take_wait_timeout(&sem_timeout[0], (int)(timeout + 5)); if (!rv) { TC_ERROR(" *** timed out even if semaphore was given in time.\n"); return TC_FAIL; } if (!is_timeout_in_range(orig_ticks, timeout)) { return TC_FAIL; } TC_PRINT("nano_task_sem_take_wait_timeout got sem in time, as expected\n"); /* * test nano_task_sem_take_wait_timeout with TICKS_NONE and the * semaphore unavailable. */ if (nano_task_sem_take_wait_timeout(&sem_timeout[0], TICKS_NONE)) { TC_ERROR("task with TICKS_NONE got sem, but shouldn't have\n"); return TC_FAIL; } TC_PRINT("task with TICKS_NONE did not get sem, as expected\n"); /* * test nano_task_sem_take_wait_timeout with TICKS_NONE and the * semaphore available. */ nano_task_sem_give(&sem_timeout[0]); if (!nano_task_sem_take_wait_timeout(&sem_timeout[0], TICKS_NONE)) { TC_ERROR("task with TICKS_NONE did not get available sem\n"); return TC_FAIL; } TC_PRINT("task with TICKS_NONE got available sem, as expected\n"); /* * test nano_task_sem_take_wait_timeout with TICKS_UNLIMITED and the * semaphore available. */ TC_PRINT("Trying to take available sem with TICKS_UNLIMITED:\n" " will hang the test if it fails.\n"); nano_task_sem_give(&sem_timeout[0]); if (!nano_task_sem_take_wait_timeout(&sem_timeout[0], TICKS_UNLIMITED)) { TC_ERROR(" *** This will never be hit!!! .\n"); return TC_FAIL; } TC_PRINT("task with TICKS_UNLIMITED got available sem, as expected\n"); /* test fiber with timeout of TICKS_NONE not getting empty semaphore */ task_fiber_start(timeout_stacks[0], FIBER_STACKSIZE, test_fiber_ticks_special_values, (int)&reply_packet, TICKS_NONE, FIBER_PRIORITY, 0); if (!nano_task_fifo_get(&timeout_order_fifo)) { TC_ERROR(" *** fiber should have run and filled the fifo.\n"); return TC_FAIL; } if (reply_packet.reply != 0) { TC_ERROR(" *** fiber should not have obtained the semaphore.\n"); return TC_FAIL; } TC_PRINT("fiber with TICKS_NONE did not get sem, as expected\n"); /* test fiber with timeout of TICKS_NONE getting full semaphore */ nano_task_sem_give(&sem_timeout[0]); task_fiber_start(timeout_stacks[0], FIBER_STACKSIZE, test_fiber_ticks_special_values, (int)&reply_packet, TICKS_NONE, FIBER_PRIORITY, 0); if (!nano_task_fifo_get(&timeout_order_fifo)) { TC_ERROR(" *** fiber should have run and filled the fifo.\n"); return TC_FAIL; } if (reply_packet.reply != 1) { TC_ERROR(" *** fiber should have obtained the semaphore.\n"); return TC_FAIL; } TC_PRINT("fiber with TICKS_NONE got available sem, as expected\n"); /* test fiber with timeout of TICKS_UNLIMITED getting full semaphore */ nano_task_sem_give(&sem_timeout[0]); task_fiber_start(timeout_stacks[0], FIBER_STACKSIZE, test_fiber_ticks_special_values, (int)&reply_packet, TICKS_UNLIMITED, FIBER_PRIORITY, 0); if (!nano_task_fifo_get(&timeout_order_fifo)) { TC_ERROR(" *** fiber should have run and filled the fifo.\n"); return TC_FAIL; } if (reply_packet.reply != 1) { TC_ERROR(" *** fiber should have obtained the semaphore.\n"); return TC_FAIL; } TC_PRINT("fiber with TICKS_UNLIMITED got available sem, as expected\n"); /* test multiple fibers pending on the same sem with different timeouts */ test_data_size = ARRAY_SIZE(timeout_order_data); TC_PRINT("testing timeouts of %d fibers on same sem\n", test_data_size); rv = test_multiple_fibers_pending(timeout_order_data, test_data_size); if (rv != TC_PASS) { TC_ERROR(" *** fibers did not time out in the right order\n"); return TC_FAIL; } /* test multiple fibers pending on different sems with different timeouts */ test_data_size = ARRAY_SIZE(timeout_order_data_mult_sem); TC_PRINT("testing timeouts of %d fibers on different sems\n", test_data_size); rv = test_multiple_fibers_pending(timeout_order_data_mult_sem, test_data_size); if (rv != TC_PASS) { TC_ERROR(" *** fibers did not time out in the right order\n"); return TC_FAIL; } /* * test multiple fibers pending on same sem with different timeouts, but * getting the semaphore in time, except the last one. */ test_data_size = ARRAY_SIZE(timeout_order_data); TC_PRINT("testing %d fibers timing out, but obtaining the sem in time\n" "(except the last one, which times out)\n", test_data_size); rv = test_multiple_fibers_get_sem(timeout_order_data, test_data_size); if (rv != TC_PASS) { TC_ERROR(" *** fibers did not get the sem in the right order\n"); return TC_FAIL; } return TC_PASS; }