コード例 #1
0
ファイル: nano_lifo.c プロジェクト: 01org/CODK-A-Firmware
void *nano_task_lifo_get_wait(struct nano_lifo *lifo)
{
	void *data;
	unsigned int imask;

	/* spin until data is put onto the LIFO */

	while (1) {
		imask = irq_lock_inline();

		/*
		 * Predict that the branch will be taken to break out of the loop.
		 * There is little cost to a misprediction since that leads to idle.
		 */

		if (likely(lifo->list))
			break;

		/* see explanation in nano_stack.c:nano_task_stack_pop_wait() */

		nano_cpu_atomic_idle(imask);
	}

	data = lifo->list;
	lifo->list = *(void **) data;

	irq_unlock_inline(imask);

	return data;
}
コード例 #2
0
ファイル: nano_sema.c プロジェクト: 01org/CODK-A-Firmware
void nano_task_sem_take_wait(struct nano_sem *sem)
{
	unsigned int imask;

	/* spin until the sempahore is signaled */

	while (1) {
		imask = irq_lock_inline();

		/*
		 * Predict that the branch will be taken to break out of the loop.
		 * There is little cost to a misprediction since that leads to idle.
		 */

		if (likely(sem->nsig > 0))
			break;

		/* see explanation in nano_stack.c:nano_task_stack_pop_wait() */

		nano_cpu_atomic_idle(imask);
	}

	sem->nsig--;
	irq_unlock_inline(imask);
}
コード例 #3
0
ファイル: nano_stack.c プロジェクト: cristhiand3/riscv_vhdl
int nano_task_stack_pop(struct nano_stack *stack, uint32_t *pData, int32_t timeout_in_ticks)
{
	unsigned int imask;

	imask = irq_lock();

	while (1) {
		/*
		 * Predict that the branch will be taken to break out of the
		 * loop.  There is little cost to a misprediction since that
		 * leads to idle.
		 */

		if (likely(stack->next > stack->base)) {
			stack->next--;
			*pData = *(stack->next);
			irq_unlock(imask);
			return 1;
		}

		if (timeout_in_ticks == TICKS_NONE) {
			break;
		}

		/*
		 * Invoke nano_cpu_atomic_idle() with interrupts still disabled
		 * to prevent the scenario where an interrupt fires after
		 * re-enabling interrupts and before executing the "halt"
		 * instruction.  If the ISR performs a nano_isr_stack_push() on
		 * the same stack object, the subsequent execution of the "halt"
		 * instruction will result in the queued data being ignored
		 * until the next interrupt, if any.
		 *
		 * Thus it should be clear that an architectures implementation
		 * of nano_cpu_atomic_idle() must be able to atomically
		 * re-enable interrupts and enter a low-power mode.
		 *
		 * This explanation is valid for all nanokernel objects: stacks,
		 * FIFOs, LIFOs, and semaphores, for their
		 * nano_task_<object>_<get>() routines.
		 */

		nano_cpu_atomic_idle(imask);
		imask = irq_lock();
	}

	irq_unlock(imask);
	return 0;
}
コード例 #4
0
ファイル: nano_lifo.c プロジェクト: 01org/CODK-A-Firmware
void *nano_task_lifo_get_wait_timeout(struct nano_lifo *lifo,
		int32_t timeout_in_ticks)
{
	int64_t cur_ticks, limit;
	unsigned int key;
	void *data;

	if (unlikely(TICKS_UNLIMITED == timeout_in_ticks)) {
		return nano_task_lifo_get_wait(lifo);
	}

	if (unlikely(TICKS_NONE == timeout_in_ticks)) {
		return nano_task_lifo_get(lifo);
	}

	key = irq_lock_inline();
	cur_ticks = nano_tick_get();
	limit = cur_ticks + timeout_in_ticks;

	while (cur_ticks < limit) {

		/*
		 * Predict that the branch will be taken to break out of the loop.
		 * There is little cost to a misprediction since that leads to idle.
		 */

		if (likely(lifo->list)) {
			data = lifo->list;
			lifo->list = *(void **)data;
			irq_unlock_inline(key);
			return data;
		}

		/* see explanation in nano_stack.c:nano_task_stack_pop_wait() */

		nano_cpu_atomic_idle(key);

		key = irq_lock_inline();
		cur_ticks = nano_tick_get();
	}

	irq_unlock_inline(key);
	return NULL;
}
コード例 #5
0
ファイル: nano_sleep.c プロジェクト: PchZhang/testgit
void task_sleep(int32_t timeout_in_ticks)
{
    int64_t  cur_ticks, limit;
    int  key;

    key = irq_lock();
    cur_ticks = sys_tick_get();
    limit = cur_ticks + timeout_in_ticks;

    while (cur_ticks < limit) {
        _NANO_TIMEOUT_SET_TASK_TIMEOUT(timeout_in_ticks);
        nano_cpu_atomic_idle(key);

        key = irq_lock();
        cur_ticks = sys_tick_get();
    }

    irq_unlock(key);
}
コード例 #6
0
ファイル: nano_lifo.c プロジェクト: CurieBSP/zephyr
void *nano_task_lifo_get(struct nano_lifo *lifo, int32_t timeout_in_ticks)
{
	int64_t cur_ticks;
	int64_t limit = 0x7fffffffffffffffll;
	unsigned int imask;

	imask = irq_lock();
	cur_ticks = _NANO_TIMEOUT_TICK_GET();
	if (timeout_in_ticks != TICKS_UNLIMITED) {
		limit = cur_ticks + timeout_in_ticks;
	}

	do {
		/*
		 * Predict that the branch will be taken to break out of the loop.
		 * There is little cost to a misprediction since that leads to idle.
		 */

		if (likely(lifo->list != NULL)) {
			void *data = lifo->list;

			lifo->list = *(void **) data;
			irq_unlock(imask);

			return data;
		}

		if (timeout_in_ticks != TICKS_NONE) {

			_NANO_TIMEOUT_SET_TASK_TIMEOUT(timeout_in_ticks);

			/* see explanation in nano_stack.c:nano_task_stack_pop() */
			nano_cpu_atomic_idle(imask);

			imask = irq_lock();
			cur_ticks = _NANO_TIMEOUT_TICK_GET();
		}
	} while (cur_ticks < limit);

	irq_unlock(imask);

	return NULL;
}
コード例 #7
0
ファイル: nano_sema.c プロジェクト: 01org/CODK-A-Firmware
int nano_task_sem_take_wait_timeout(struct nano_sem *sem, int32_t timeout_in_ticks)
{
	int64_t cur_ticks, limit;
	unsigned int key;

	if (unlikely(TICKS_UNLIMITED == timeout_in_ticks)) {
		nano_task_sem_take_wait(sem);
		return 1;
	}

	if (unlikely(TICKS_NONE == timeout_in_ticks)) {
		return nano_task_sem_take(sem);
	}

	key = irq_lock_inline();
	cur_ticks = nano_tick_get();
	limit = cur_ticks + timeout_in_ticks;

	while (cur_ticks < limit) {

		/*
		 * Predict that the branch will be taken to break out of the loop.
		 * There is little cost to a misprediction since that leads to idle.
		 */

		if (likely(sem->nsig > 0)) {
			sem->nsig--;
			irq_unlock_inline(key);
			return 1;
		}

		/* see explanation in nano_stack.c:nano_task_stack_pop_wait() */

		nano_cpu_atomic_idle(key);

		key = irq_lock_inline();
		cur_ticks = nano_tick_get();
	}

	irq_unlock_inline(key);
	return 0;
}