void _timer_stop_non_preemptible(struct nano_timer *timer) { struct _nano_timeout *t = &timer->timeout_data; struct tcs *tcs = t->tcs; int key = irq_lock(); /* * Verify first if fiber is not waiting on an object, * timer is not expired and there is a fiber waiting * on it */ if (!t->wait_q && (_nano_timer_timeout_abort(t) == 0) && tcs != NULL) { if (_IS_MICROKERNEL_TASK(tcs)) { _NANO_TIMER_TASK_READY(tcs); } else { _nano_fiber_ready(tcs); } } /* * After timer gets aborted nano_timer_test() should * return NULL until timer gets restarted */ timer->user_data = NULL; irq_unlock(key); }
void _fiber_wakeup(nano_thread_id_t fiber) { int key = irq_lock(); /* verify first if fiber is not waiting on an object */ if (!fiber->nano_timeout.wait_q && (_nano_timeout_abort(fiber) == 0)) { _nano_fiber_ready(fiber); } irq_unlock(key); }
void task_fiber_wakeup(nano_thread_id_t fiber) { int key = irq_lock(); /* verify first if fiber is not waiting on an object */ if ((fiber->nano_timeout.wait_q) || (_nano_timeout_abort(fiber) < 0)) { irq_unlock(key); } else { _nano_fiber_ready(fiber); _Swap(key); } }
void fiber_yield(void) { unsigned int imask = irq_lock(); if ((_nanokernel.fiber != (struct tcs *)NULL) && (_nanokernel.current->prio >= _nanokernel.fiber->prio)) { /* * Reinsert current thread into the list of runnable threads, * and then swap to the thread at the head of the fiber list. */ _nano_fiber_ready(_nanokernel.current); _Swap(imask); } else { irq_unlock(imask); } }
void _fiber_start(char *pStack, unsigned stackSize, /* stack size in bytes */ nano_fiber_entry_t pEntry, int parameter1, int parameter2, unsigned priority, unsigned options) { struct tcs *tcs; unsigned int imask; tcs = (struct tcs *) pStack; _new_thread(pStack, stackSize, (_thread_entry_t)pEntry, (void *)parameter1, (void *)parameter2, (void *)0, priority, options); /* * _new_thread() has already set the flags depending on the 'options' * and 'priority' parameters passed to it */ /* lock interrupts to prevent corruption of the runnable fiber list */ imask = irq_lock(); /* make the newly crafted TCS a runnable fiber */ _nano_fiber_ready(tcs); /* * Simply return to the caller if the current thread is FIBER, * otherwise swap into the newly created fiber */ if ((_nanokernel.current->flags & TASK) == TASK) { _Swap(imask); } else { irq_unlock(imask); } }
/** * * @brief Push data onto a stack (no context switch) * * This routine pushes a data item onto a stack object; it may be called from * either a fiber or ISR context. A fiber pending on the stack object will be * made ready, but will NOT be scheduled to execute. * * @param stack Stack on which to interact * @param data Data to push on stack * @return N/A * * @internal * This function is capable of supporting invocations from both a fiber and an * ISR context. However, the nano_isr_stack_push and nano_fiber_stack_push * aliases are created to support any required implementation differences in * the future without introducing a source code migration issue. * @endinternal */ void _stack_push_non_preemptible(struct nano_stack *stack, uint32_t data) { struct tcs *tcs; unsigned int imask; imask = irq_lock(); tcs = stack->fiber; if (tcs) { stack->fiber = 0; fiberRtnValueSet(tcs, data); _nano_fiber_ready(tcs); } else { *(stack->next) = data; stack->next++; } irq_unlock(imask); }
void nano_task_stack_push(struct nano_stack *stack, uint32_t data) { struct tcs *tcs; unsigned int imask; imask = irq_lock(); tcs = stack->fiber; if (tcs) { stack->fiber = 0; fiberRtnValueSet(tcs, data); _nano_fiber_ready(tcs); _Swap(imask); return; } *(stack->next) = data; stack->next++; irq_unlock(imask); }
void nano_task_timer_stop(struct nano_timer *timer) { struct _nano_timeout *t = &timer->timeout_data; struct tcs *tcs = t->tcs; int key = irq_lock(); timer->user_data = NULL; /* * Verify first if fiber is not waiting on an object, * timer is not expired and there is a fiber waiting * on it */ if (!t->wait_q && (_nano_timer_timeout_abort(t) == 0) && tcs != NULL) { if (!_IS_MICROKERNEL_TASK(tcs)) { _nano_fiber_ready(tcs); _Swap(key); return; } _TASK_NANO_TIMER_TASK_READY(tcs); } irq_unlock(key); }