/** * Set a timer. * * This function is used to set a timer for a time sometime in the * future. The function timer_expired() will evaluate to true after * the timer has expired. * * \param t A pointer to the timer * \param interval The interval before the timer expires. * */ void timer_set(struct timer *t, clock_time_t interval) { do_init(t); if (t->started) { timer_stop(t); } switch (sys_execution_context_type_get()) { case NANO_CTX_FIBER: nano_fiber_timer_start(&t->nano_timer, interval); break; case NANO_CTX_TASK: nano_task_timer_start(&t->nano_timer, interval); break; default: return; } PRINTF("%s():%d timer %p started interval %d\n", __FUNCTION__, __LINE__, t, interval); t->started = true; t->interval = interval; t->start = clock_time(); t->triggered = false; }
int nanoCtxTaskTest(void) { nano_thread_id_t self_thread_id; TC_PRINT("Testing sys_thread_self_get() from an ISR and task\n"); self_thread_id = sys_thread_self_get(); isrInfo.command = THREAD_SELF_CMD; isrInfo.error = 0; _trigger_isrHandler(); if ((isrInfo.error != 0) || (isrInfo.data != (void *) self_thread_id)) { /* * Either the ISR detected an error, or the ISR context ID does not * match the interrupted task's thread ID. */ return TC_FAIL; } TC_PRINT("Testing sys_execution_context_type_get() from an ISR\n"); isrInfo.command = EXEC_CTX_TYPE_CMD; isrInfo.error = 0; _trigger_isrHandler(); if ((isrInfo.error != 0) || (isrInfo.value != NANO_CTX_ISR)) { return TC_FAIL; } TC_PRINT("Testing sys_execution_context_type_get() from a task\n"); if (sys_execution_context_type_get() != NANO_CTX_TASK) { return TC_FAIL; } return TC_PASS; }
void nano_timer_stop(struct nano_timer *timer) { static void (*func[3])(struct nano_timer *) = { nano_isr_timer_stop, nano_fiber_timer_stop, nano_task_timer_stop, }; func[sys_execution_context_type_get()](timer); }
void *nano_timer_test(struct nano_timer *timer, int32_t timeout_in_ticks) { static void *(*func[3])(struct nano_timer *, int32_t) = { nano_isr_timer_test, nano_fiber_timer_test, nano_task_timer_test, }; return func[sys_execution_context_type_get()](timer, timeout_in_ticks); }
void nano_lifo_put(struct nano_lifo *lifo, void *data) { static void (*func[3])(struct nano_lifo *, void *) = { nano_isr_lifo_put, nano_fiber_lifo_put, nano_task_lifo_put }; func[sys_execution_context_type_get()](lifo, data); }
void fiber_wakeup(nano_thread_id_t fiber) { static void (*func[3])(nano_thread_id_t) = { isr_fiber_wakeup, fiber_fiber_wakeup, task_fiber_wakeup }; func[sys_execution_context_type_get()](fiber); }
int nano_stack_pop(struct nano_stack *stack, uint32_t *pData, int32_t timeout_in_ticks) { static int (*func[3])(struct nano_stack *, uint32_t *, int32_t) = { nano_isr_stack_pop, nano_fiber_stack_pop, nano_task_stack_pop, }; return func[sys_execution_context_type_get()](stack, pData, timeout_in_ticks); }
void nano_stack_push(struct nano_stack *stack, uint32_t data) { static void (*func[3])(struct nano_stack *, uint32_t) = { nano_isr_stack_push, nano_fiber_stack_push, nano_task_stack_push }; func[sys_execution_context_type_get()](stack, data); }
void *nano_lifo_get(struct nano_lifo *lifo, int32_t timeout) { static void *(*func[3])(struct nano_lifo *, int32_t) = { nano_isr_lifo_get, nano_fiber_lifo_get, nano_task_lifo_get }; return func[sys_execution_context_type_get()](lifo, timeout); }
/********************* * Generic functions * ********************/ static void _usleep(uint32_t usec) { static void (*func[3])(int32_t timeout_in_ticks) = { NULL, fiber_sleep, task_sleep, }; if (sys_execution_context_type_get() == 0) { sys_thread_busy_wait(usec); return; } /* Timeout in ticks: */ usec = USEC(usec); /** Most likely usec will generate 0 ticks, * so setting at least to 1 */ if (!usec) { usec = 1; } func[sys_execution_context_type_get()](usec); }
static void context_sem_give(struct nano_sem *chan) { switch (sys_execution_context_type_get()) { case NANO_CTX_FIBER: nano_fiber_sem_give(chan); break; case NANO_CTX_TASK: nano_task_sem_give(chan); break; case NANO_CTX_ISR: default: /* Invalid context type */ break; } }
void isr_handler(void *data) { ARG_UNUSED(data); switch (isrInfo.command) { case THREAD_SELF_CMD: isrInfo.data = (void *) sys_thread_self_get(); break; case EXEC_CTX_TYPE_CMD: isrInfo.value = sys_execution_context_type_get(); break; default: isrInfo.error = UNKNOWN_COMMAND; break; } }
/** * Restart the timer from the current point in time * * This function restarts a timer with the same interval that was * given to the timer_set() function. The timer will start at the * current time. * * \note A periodic timer will drift if this function is used to reset * it. For preioric timers, use the timer_reset() function instead. * * \param t A pointer to the timer. * * \sa timer_reset() */ void timer_restart(struct timer *t) { do_init(t); if (t->started) { timer_stop(t); } switch (sys_execution_context_type_get()) { case NANO_CTX_FIBER: nano_fiber_timer_start(&t->nano_timer, t->interval); break; case NANO_CTX_TASK: nano_task_timer_start(&t->nano_timer, t->interval); break; default: return; } t->started = true; t->start = clock_time(); t->triggered = false; }
/*---------------------------------------------------------------------------*/ bool timer_stop(struct timer *t) { if (!t->started) { return false; } switch (sys_execution_context_type_get()) { case NANO_CTX_FIBER: nano_fiber_timer_stop(&t->nano_timer); break; case NANO_CTX_TASK: nano_task_timer_stop(&t->nano_timer); break; default: return false; } PRINTF("%s():%d timer %p stopped\n", __FUNCTION__, __LINE__, t); t->started = false; t->triggered = false; return true; }
/** * * @brief Fatal error handler * * This routine implements the corrective action to be taken when the system * detects a fatal error. * * This sample implementation attempts to abort the current thread and allow * the system to continue executing, which may permit the system to continue * functioning with degraded capabilities. * * System designers may wish to enhance or substitute this sample * implementation to take other actions, such as logging error (or debug) * information to a persistent repository and/or rebooting the system. * * @param reason the fatal error reason * @param pEsf the pointer to the exception stack frame * * @return This function does not return. */ FUNC_NORETURN void _SysFatalErrorHandler(unsigned int reason, const NANO_ESF * pEsf) { nano_context_type_t curCtx = sys_execution_context_type_get(); ARG_UNUSED(reason); ARG_UNUSED(pEsf); if ((curCtx != NANO_CTX_ISR) && !_is_thread_essential(NULL)) { #ifdef CONFIG_MICROKERNEL if (curCtx == NANO_CTX_TASK) { extern FUNC_NORETURN void _TaskAbort(void); PRINTK("Fatal task error! Aborting task.\n"); _TaskAbort(); } else #endif /* CONFIG_MICROKERNEL */ { PRINTK("Fatal fiber error! Aborting fiber.\n"); fiber_abort(); } } else { #ifdef CONFIG_PRINTK /* * Conditionalize the ctxText[] definition to prevent an "unused * variable" warning when the PRINTK kconfig option is disabled. */ static const char * const ctxText[] = {"ISR", "essential fiber", "essential task"}; PRINTK("Fatal %s error! Spinning...\n", ctxText[curCtx]); #endif /* CONFIG_PRINTK */ } do { } while (1); }
int nanoCtxFiberTest(nano_thread_id_t task_thread_id) { nano_thread_id_t self_thread_id; self_thread_id = sys_thread_self_get(); if (self_thread_id == task_thread_id) { fiberDetectedError = 1; return TC_FAIL; } isrInfo.command = THREAD_SELF_CMD; isrInfo.error = 0; _trigger_isrHandler(); if ((isrInfo.error != 0) || (isrInfo.data != (void *) self_thread_id)) { /* * Either the ISR detected an error, or the ISR context ID does not * match the interrupted fiber's thread ID. */ fiberDetectedError = 2; return TC_FAIL; } isrInfo.command = EXEC_CTX_TYPE_CMD; isrInfo.error = 0; _trigger_isrHandler(); if ((isrInfo.error != 0) || (isrInfo.value != NANO_CTX_ISR)) { fiberDetectedError = 3; return TC_FAIL; } if (sys_execution_context_type_get() != NANO_CTX_FIBER) { fiberDetectedError = 4; return TC_FAIL; } return TC_PASS; }
int bt_enable(bt_ready_cb_t cb) { struct device *gpio; int ret; BT_DBG(""); gpio = device_get_binding(CONFIG_GPIO_DW_0_NAME); if (!gpio) { BT_ERR("Cannot find %s", CONFIG_GPIO_DW_0_NAME); return -ENODEV; } ret = gpio_pin_configure(gpio, NBLE_RESET_PIN, GPIO_DIR_OUT); if (ret) { BT_ERR("Error configuring pin %d", NBLE_RESET_PIN); return -ENODEV; } /* Reset hold time is 0.2us (normal) or 100us (SWD debug) */ ret = gpio_pin_write(gpio, NBLE_RESET_PIN, 0); if (ret) { BT_ERR("Error pin write %d", NBLE_RESET_PIN); return -EINVAL; } ret = gpio_pin_configure(gpio, NBLE_BTWAKE_PIN, GPIO_DIR_OUT); if (ret) { BT_ERR("Error configuring pin %d", NBLE_BTWAKE_PIN); return -ENODEV; } ret = gpio_pin_write(gpio, NBLE_BTWAKE_PIN, 1); if (ret) { BT_ERR("Error pin write %d", NBLE_BTWAKE_PIN); return -EINVAL; } /** * NBLE reset is achieved by asserting low the SWDIO pin. * However, the BLE Core chip can be in SWD debug mode, * and NRF_POWER->RESET = 0 due to, other constraints: therefore, * this reset might not work everytime, especially after * flashing or debugging. */ /* sleep 1ms depending on context */ switch (sys_execution_context_type_get()) { case NANO_CTX_FIBER: fiber_sleep(MSEC(1)); break; case NANO_CTX_TASK: task_sleep(MSEC(1)); break; default: BT_ERR("ISR context is not supported"); return -EINVAL; } ret = nble_open(); if (ret) { return ret; } ret = gpio_pin_write(gpio, NBLE_RESET_PIN, 1); if (ret) { BT_ERR("Error pin write %d", NBLE_RESET_PIN); return -EINVAL; } /* Set back GPIO to input to avoid interfering with external debugger */ ret = gpio_pin_configure(gpio, NBLE_RESET_PIN, GPIO_DIR_IN); if (ret) { BT_ERR("Error configuring pin %d", NBLE_RESET_PIN); return -ENODEV; } bt_ready_cb = cb; return 0; }