Exemple #1
0
/**
 * @brief Reset the specified task state bits
 *
 * This routine resets the specified task state bits.  When a task's state bits
 * are zero, the task may be scheduled to run.  The tasks's state bits are a
 * bitmask of the TF_xxx bits.  Each TF_xxx bit indicates a reason why the task
 * must not be scheduled to run.
 *
 * @param X Pointer to task
 * @param bits Bitmask of TF_xxx bits to reset
 * @return N/A
 *
 * @internal
 * When operating on microkernel objects, this routine is invoked in the
 * context of the microkernel server fiber. However, since microkernel tasks
 * may pend/unpend on nanokernel objects, interrupts must be locked to
 * prevent data corruption.
 * @endinternal
 */
void _k_state_bit_reset(struct k_task *X, uint32_t bits)
{
	unsigned int key = irq_lock();
	uint32_t f_old = X->state;      /* old state bits */
	uint32_t f_new = f_old & ~bits; /* new state bits */

	X->state = f_new; /* Update task's state bits */

	if ((f_old != 0) && (f_new == 0)) {
		/*
		 * The task may now be scheduled to run (but could not
		 * previously) as all the TF_xxx bits are clear.  It must
		 * be added to the list of schedulable tasks.
		 */

		struct k_tqhd *H = _k_task_priority_list + X->priority;

		X->next = NULL;
		H->tail->next = X;
		H->tail = X;
		_k_task_priority_bitmap[X->priority >> 5] |=
			(1 << (X->priority & 0x1F));
	}

	irq_unlock(key);

#ifdef CONFIG_TASK_MONITOR
	f_new ^= f_old;
	if ((_k_monitor_mask & MON_STATE) && (f_new)) {
		/*
		 * Task monitoring is enabled and the new state bits are
		 * different than the old state bits.
		 *
		 * <f_new> now contains the bits that are different.
		 */

		_k_task_monitor(X, f_new | MO_STBIT0);
	}
#endif
}
Exemple #2
0
/**
 *
 * @brief The microkernel thread entry point
 *
 * This function implements the microkernel fiber.  It waits for command
 * packets to arrive on its command stack. It executes all commands on the
 * stack and then sets up the next task that is ready to run. Next it
 * goes to wait on further inputs on the command stack.
 *
 * @return Does not return.
 */
FUNC_NORETURN void _k_server(int unused1, int unused2)
{
	struct k_args *pArgs;
	struct k_task *pNextTask;

	ARG_UNUSED(unused1);
	ARG_UNUSED(unused2);

	/* indicate that failure of this fiber may be fatal to the entire system
	 */

	_nanokernel.current->flags |= ESSENTIAL;

	while (1) { /* forever */
		(void) nano_fiber_stack_pop(&_k_command_stack, (uint32_t *)&pArgs,
				TICKS_UNLIMITED); /* will schedule */
		do {
			int cmd_type = (int)pArgs & KERNEL_CMD_TYPE_MASK;

			if (cmd_type == KERNEL_CMD_PACKET_TYPE) {

				/* process command packet */

#ifdef CONFIG_TASK_MONITOR
				if (_k_monitor_mask & MON_KSERV) {
					_k_task_monitor_args(pArgs);
				}
#endif
				(*pArgs->Comm)(pArgs);
			} else if (cmd_type == KERNEL_CMD_EVENT_TYPE) {

				/* give event */

#ifdef CONFIG_TASK_MONITOR
				if (_k_monitor_mask & MON_EVENT) {
					_k_task_monitor_args(pArgs);
				}
#endif
				kevent_t event = (int)pArgs & ~KERNEL_CMD_TYPE_MASK;

				_k_do_event_signal(event);
			} else { /* cmd_type == KERNEL_CMD_SEMAPHORE_TYPE */

				/* give semaphore */

#ifdef CONFIG_TASK_MONITOR
				/* task monitoring for giving semaphore not implemented */
#endif
				ksem_t sem = (int)pArgs & ~KERNEL_CMD_TYPE_MASK;

				_k_sem_struct_value_update(1, (struct _k_sem_struct *)sem);
			}

			/*
			 * check if another fiber (of equal or greater priority)
			 * needs to run
			 */

			if (_nanokernel.fiber) {
				fiber_yield();
			}
		} while (nano_fiber_stack_pop(&_k_command_stack, (uint32_t *)&pArgs,
					TICKS_NONE));

		pNextTask = next_task_select();

		if (_k_current_task != pNextTask) {

			/*
			 * switch from currently selected task to a different
			 * one
			 */

#ifdef CONFIG_WORKLOAD_MONITOR
			if (pNextTask->id == 0x00000000) {
				_k_workload_monitor_idle_start();
			} else if (_k_current_task->id == 0x00000000) {
				_k_workload_monitor_idle_end();
			}
#endif

			_k_current_task = pNextTask;
			_nanokernel.task = (struct tcs *)pNextTask->workspace;

#ifdef CONFIG_TASK_MONITOR
			if (_k_monitor_mask & MON_TSWAP) {
				_k_task_monitor(_k_current_task, 0);
			}
#endif
		}
	}

	/*
	 * Code analyzers may complain that _k_server() uses an infinite loop
	 * unless we indicate that this is intentional
	 */

	CODE_UNREACHABLE;
}