Exemplo n.º 1
0
void *nano_task_lifo_get_wait(struct nano_lifo *lifo)
{
	void *data;
	unsigned int imask;

	/* spin until data is put onto the LIFO */

	while (1) {
		imask = irq_lock_inline();

		/*
		 * Predict that the branch will be taken to break out of the loop.
		 * There is little cost to a misprediction since that leads to idle.
		 */

		if (likely(lifo->list))
			break;

		/* see explanation in nano_stack.c:nano_task_stack_pop_wait() */

		nano_cpu_atomic_idle(imask);
	}

	data = lifo->list;
	lifo->list = *(void **) data;

	irq_unlock_inline(imask);

	return data;
}
Exemplo n.º 2
0
void nano_task_sem_take_wait(struct nano_sem *sem)
{
	unsigned int imask;

	/* spin until the sempahore is signaled */

	while (1) {
		imask = irq_lock_inline();

		/*
		 * Predict that the branch will be taken to break out of the loop.
		 * There is little cost to a misprediction since that leads to idle.
		 */

		if (likely(sem->nsig > 0))
			break;

		/* see explanation in nano_stack.c:nano_task_stack_pop_wait() */

		nano_cpu_atomic_idle(imask);
	}

	sem->nsig--;
	irq_unlock_inline(imask);
}
Exemplo n.º 3
0
int nano_task_stack_pop(struct nano_stack *stack, uint32_t *pData, int32_t timeout_in_ticks)
{
	unsigned int imask;

	imask = irq_lock();

	while (1) {
		/*
		 * Predict that the branch will be taken to break out of the
		 * loop.  There is little cost to a misprediction since that
		 * leads to idle.
		 */

		if (likely(stack->next > stack->base)) {
			stack->next--;
			*pData = *(stack->next);
			irq_unlock(imask);
			return 1;
		}

		if (timeout_in_ticks == TICKS_NONE) {
			break;
		}

		/*
		 * Invoke nano_cpu_atomic_idle() with interrupts still disabled
		 * to prevent the scenario where an interrupt fires after
		 * re-enabling interrupts and before executing the "halt"
		 * instruction.  If the ISR performs a nano_isr_stack_push() on
		 * the same stack object, the subsequent execution of the "halt"
		 * instruction will result in the queued data being ignored
		 * until the next interrupt, if any.
		 *
		 * Thus it should be clear that an architectures implementation
		 * of nano_cpu_atomic_idle() must be able to atomically
		 * re-enable interrupts and enter a low-power mode.
		 *
		 * This explanation is valid for all nanokernel objects: stacks,
		 * FIFOs, LIFOs, and semaphores, for their
		 * nano_task_<object>_<get>() routines.
		 */

		nano_cpu_atomic_idle(imask);
		imask = irq_lock();
	}

	irq_unlock(imask);
	return 0;
}
Exemplo n.º 4
0
void *nano_task_lifo_get_wait_timeout(struct nano_lifo *lifo,
		int32_t timeout_in_ticks)
{
	int64_t cur_ticks, limit;
	unsigned int key;
	void *data;

	if (unlikely(TICKS_UNLIMITED == timeout_in_ticks)) {
		return nano_task_lifo_get_wait(lifo);
	}

	if (unlikely(TICKS_NONE == timeout_in_ticks)) {
		return nano_task_lifo_get(lifo);
	}

	key = irq_lock_inline();
	cur_ticks = nano_tick_get();
	limit = cur_ticks + timeout_in_ticks;

	while (cur_ticks < limit) {

		/*
		 * Predict that the branch will be taken to break out of the loop.
		 * There is little cost to a misprediction since that leads to idle.
		 */

		if (likely(lifo->list)) {
			data = lifo->list;
			lifo->list = *(void **)data;
			irq_unlock_inline(key);
			return data;
		}

		/* see explanation in nano_stack.c:nano_task_stack_pop_wait() */

		nano_cpu_atomic_idle(key);

		key = irq_lock_inline();
		cur_ticks = nano_tick_get();
	}

	irq_unlock_inline(key);
	return NULL;
}
Exemplo n.º 5
0
void task_sleep(int32_t timeout_in_ticks)
{
    int64_t  cur_ticks, limit;
    int  key;

    key = irq_lock();
    cur_ticks = sys_tick_get();
    limit = cur_ticks + timeout_in_ticks;

    while (cur_ticks < limit) {
        _NANO_TIMEOUT_SET_TASK_TIMEOUT(timeout_in_ticks);
        nano_cpu_atomic_idle(key);

        key = irq_lock();
        cur_ticks = sys_tick_get();
    }

    irq_unlock(key);
}
Exemplo n.º 6
0
void *nano_task_lifo_get(struct nano_lifo *lifo, int32_t timeout_in_ticks)
{
	int64_t cur_ticks;
	int64_t limit = 0x7fffffffffffffffll;
	unsigned int imask;

	imask = irq_lock();
	cur_ticks = _NANO_TIMEOUT_TICK_GET();
	if (timeout_in_ticks != TICKS_UNLIMITED) {
		limit = cur_ticks + timeout_in_ticks;
	}

	do {
		/*
		 * Predict that the branch will be taken to break out of the loop.
		 * There is little cost to a misprediction since that leads to idle.
		 */

		if (likely(lifo->list != NULL)) {
			void *data = lifo->list;

			lifo->list = *(void **) data;
			irq_unlock(imask);

			return data;
		}

		if (timeout_in_ticks != TICKS_NONE) {

			_NANO_TIMEOUT_SET_TASK_TIMEOUT(timeout_in_ticks);

			/* see explanation in nano_stack.c:nano_task_stack_pop() */
			nano_cpu_atomic_idle(imask);

			imask = irq_lock();
			cur_ticks = _NANO_TIMEOUT_TICK_GET();
		}
	} while (cur_ticks < limit);

	irq_unlock(imask);

	return NULL;
}
Exemplo n.º 7
0
int nano_task_sem_take_wait_timeout(struct nano_sem *sem, int32_t timeout_in_ticks)
{
	int64_t cur_ticks, limit;
	unsigned int key;

	if (unlikely(TICKS_UNLIMITED == timeout_in_ticks)) {
		nano_task_sem_take_wait(sem);
		return 1;
	}

	if (unlikely(TICKS_NONE == timeout_in_ticks)) {
		return nano_task_sem_take(sem);
	}

	key = irq_lock_inline();
	cur_ticks = nano_tick_get();
	limit = cur_ticks + timeout_in_ticks;

	while (cur_ticks < limit) {

		/*
		 * Predict that the branch will be taken to break out of the loop.
		 * There is little cost to a misprediction since that leads to idle.
		 */

		if (likely(sem->nsig > 0)) {
			sem->nsig--;
			irq_unlock_inline(key);
			return 1;
		}

		/* see explanation in nano_stack.c:nano_task_stack_pop_wait() */

		nano_cpu_atomic_idle(key);

		key = irq_lock_inline();
		cur_ticks = nano_tick_get();
	}

	irq_unlock_inline(key);
	return 0;
}