void *nano_fiber_lifo_get_wait_timeout(struct nano_lifo *lifo, int32_t timeout_in_ticks) { unsigned int key = irq_lock_inline(); void *data; if (!lifo->list) { if (unlikely(TICKS_NONE == timeout_in_ticks)) { irq_unlock_inline(key); return NULL; } if (likely(timeout_in_ticks != TICKS_UNLIMITED)) { _nano_timeout_add(_nanokernel.current, &lifo->wait_q, timeout_in_ticks); } _nano_wait_q_put(&lifo->wait_q); data = (void *)_Swap(key); } else { data = lifo->list; lifo->list = *(void **)data; irq_unlock_inline(key); } return data; }
/** * INTERNAL * There exists a separate nano_task_sem_take_wait() implementation since a task * context cannot pend on a nanokernel object. Instead, tasks will poll * the sempahore object. */ void nano_fiber_sem_take_wait(struct nano_sem *sem) { unsigned int imask; imask = irq_lock_inline(); if (sem->nsig == 0) { _nano_wait_q_put(&sem->wait_q); _Swap(imask); } else { sem->nsig--; irq_unlock_inline(imask); } }
/** INTERNAL * * There exists a separate nano_task_lifo_get_wait() implementation since a * task context cannot pend on a nanokernel object. Instead, tasks will poll * the lifo object. */ void *nano_fiber_lifo_get_wait(struct nano_lifo *lifo ) { void *data; unsigned int imask; imask = irq_lock_inline(); if (!lifo->list) { _nano_wait_q_put(&lifo->wait_q); data = (void *) _Swap(imask); } else { data = lifo->list; lifo->list = *(void **) data; irq_unlock_inline(imask); } return data; }
void *_lifo_get(struct nano_lifo *lifo, int32_t timeout_in_ticks) { void *data = NULL; unsigned int imask; imask = irq_lock(); if (likely(lifo->list != NULL)) { data = lifo->list; lifo->list = *(void **) data; } else if (timeout_in_ticks != TICKS_NONE) { _NANO_TIMEOUT_ADD(&lifo->wait_q, timeout_in_ticks); _nano_wait_q_put(&lifo->wait_q); data = (void *) _Swap(imask); return data; } irq_unlock(imask); return data; }
int nano_fiber_sem_take_wait_timeout(struct nano_sem *sem, int32_t timeout_in_ticks) { unsigned int key = irq_lock_inline(); if (sem->nsig == 0) { if (unlikely(TICKS_NONE == timeout_in_ticks)) { irq_unlock_inline(key); return 0; } if (likely(timeout_in_ticks != TICKS_UNLIMITED)) { _nano_timeout_add(_nanokernel.current, &sem->wait_q, timeout_in_ticks); } _nano_wait_q_put(&sem->wait_q); return _Swap(key); } sem->nsig--; irq_unlock_inline(key); return 1; }