/** * * @brief Initializes nanokernel data structures * * This routine initializes various nanokernel data structures, including * the background (or idle) task and any architecture-specific initialization. * * Note that all fields of "_nanokernel" are set to zero on entry, which may * be all the initialization many of them require. * * @return N/A */ static void nano_init(struct tcs *dummyOutContext) { /* * Initialize the current execution thread to permit a level of debugging * output if an exception should happen during nanokernel initialization. * However, don't waste effort initializing the fields of the dummy thread * beyond those needed to identify it as a dummy thread. */ _nanokernel.current = dummyOutContext; /* * Do not insert dummy execution context in the list of fibers, so that it * does not get scheduled back in once context-switched out. */ dummyOutContext->link = (struct tcs *)NULL; dummyOutContext->flags = FIBER | ESSENTIAL; dummyOutContext->prio = 0; /* * The interrupt library needs to be initialized early since a series of * handlers are installed into the interrupt table to catch spurious * interrupts. This must be performed before other nanokernel subsystems * install bonafide handlers, or before hardware device drivers are * initialized. */ _IntLibInit(); /* * Initialize the thread control block (TCS) for the main task (either * background or idle task). The entry point for this thread is '_main'. */ _nanokernel.task = (struct tcs *) main_task_stack; _new_thread(main_task_stack, /* pStackMem */ CONFIG_MAIN_STACK_SIZE, /* stackSize */ (_thread_entry_t)_main, /* pEntry */ (_thread_arg_t)0, /* parameter1 */ (_thread_arg_t)0, /* parameter2 */ (_thread_arg_t)0, /* parameter3 */ -1, /* priority */ 0 /* options */ ); /* indicate that failure of this task may be fatal to the entire system */ _nanokernel.task->flags |= ESSENTIAL; initialize_nano_timeouts(); /* perform any architecture-specific initialization */ nanoArchInit(); }
void create_thread(thread_func_t f, void * arg) { // __asm enter // or __asm push ebp; mov esp, ebp thread_list * new_thread; if( myEvent == 0){ initTimer(); } new_thread = _new_thread(f, arg); if(sys.threads == 0){ thread_list * main_thread = _new_thread(0,0); sys.threads = sys.current = main_thread; } _insert_thread(new_thread, sys.threads->prev); sys.current = new_thread; start_thread(& new_thread->thread); sys.current = sys.threads; // Thanks there is leave here, then the esp is correct // // __asm leave // or __asm mov ebp esp; pop ebp }
k_tid_t k_thread_spawn(char *stack, size_t stack_size, void (*entry)(void *, void *, void*), void *p1, void *p2, void *p3, int prio, u32_t options, s32_t delay) { __ASSERT(!_is_in_isr(), ""); struct k_thread *new_thread = (struct k_thread *)stack; _new_thread(stack, stack_size, entry, p1, p2, p3, prio, options); schedule_new_thread(new_thread, delay); return new_thread; }
void _fiber_start(char *pStack, unsigned stackSize, /* stack size in bytes */ nano_fiber_entry_t pEntry, int parameter1, int parameter2, unsigned priority, unsigned options) { struct tcs *tcs; unsigned int imask; tcs = (struct tcs *) pStack; _new_thread(pStack, stackSize, (_thread_entry_t)pEntry, (void *)parameter1, (void *)parameter2, (void *)0, priority, options); /* * _new_thread() has already set the flags depending on the 'options' * and 'priority' parameters passed to it */ /* lock interrupts to prevent corruption of the runnable fiber list */ imask = irq_lock(); /* make the newly crafted TCS a runnable fiber */ _nano_fiber_ready(tcs); /* * Simply return to the caller if the current thread is FIBER, * otherwise swap into the newly created fiber */ if ((_nanokernel.current->flags & TASK) == TASK) { _Swap(imask); } else { irq_unlock(imask); } }
void *fiber_delayed_start(char *stack, unsigned int stack_size_in_bytes, nano_fiber_entry_t entry_point, int param1, int param2, unsigned int priority, unsigned int options, int32_t timeout_in_ticks) { unsigned int key; struct tcs *tcs; tcs = (struct tcs *)stack; _new_thread(stack, stack_size_in_bytes, (_thread_entry_t)entry_point, (void *)param1, (void *)param2, (void *)0, priority, options); key = irq_lock(); _nano_timeout_add(tcs, NULL, timeout_in_ticks); irq_unlock(key); return tcs; }
void _init_static_threads(void) { unsigned int key; _FOREACH_STATIC_THREAD(thread_data) { _task_group_adjust(thread_data); _new_thread( thread_data->init_stack, thread_data->init_stack_size, thread_data->init_entry, thread_data->init_p1, thread_data->init_p2, thread_data->init_p3, thread_data->init_prio, thread_data->init_options); thread_data->thread->init_data = thread_data; } _sched_lock(); /* Start all (legacy) threads that are part of the EXE task group */ _k_thread_group_op(K_TASK_GROUP_EXE, _k_thread_single_start); /* * Non-legacy static threads may be started immediately or after a * previously specified delay. Even though the scheduler is locked, * ticks can still be delivered and processed. Lock interrupts so * that the countdown until execution begins from the same tick. * * Note that static threads defined using the legacy API have a * delay of K_FOREVER. */ key = irq_lock(); _FOREACH_STATIC_THREAD(thread_data) { if (thread_data->init_delay != K_FOREVER) { schedule_new_thread(thread_data->thread, thread_data->init_delay); } } irq_unlock(key); k_sched_unlock(); }
/** * * @brief Initializes kernel data structures * * This routine initializes various kernel data structures, including * the init and idle threads and any architecture-specific initialization. * * Note that all fields of "_kernel" are set to zero on entry, which may * be all the initialization many of them require. * * @return N/A */ static void prepare_multithreading(struct k_thread *dummy_thread) { #ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN ARG_UNUSED(dummy_thread); #else /* * Initialize the current execution thread to permit a level of * debugging output if an exception should happen during kernel * initialization. However, don't waste effort initializing the * fields of the dummy thread beyond those needed to identify it as a * dummy thread. */ _current = dummy_thread; dummy_thread->base.user_options = K_ESSENTIAL; dummy_thread->base.thread_state = _THREAD_DUMMY; #endif /* _kernel.ready_q is all zeroes */ /* * The interrupt library needs to be initialized early since a series * of handlers are installed into the interrupt table to catch * spurious interrupts. This must be performed before other kernel * subsystems install bonafide handlers, or before hardware device * drivers are initialized. */ _IntLibInit(); /* ready the init/main and idle threads */ for (int ii = 0; ii < K_NUM_PRIORITIES; ii++) { sys_dlist_init(&_ready_q.q[ii]); } /* * prime the cache with the main thread since: * * - the cache can never be NULL * - the main thread will be the one to run first * - no other thread is initialized yet and thus their priority fields * contain garbage, which would prevent the cache loading algorithm * to work as intended */ _ready_q.cache = _main_thread; _new_thread(_main_thread, _main_stack, MAIN_STACK_SIZE, _main, NULL, NULL, NULL, CONFIG_MAIN_THREAD_PRIORITY, K_ESSENTIAL); _mark_thread_as_started(_main_thread); _add_thread_to_ready_q(_main_thread); #ifdef CONFIG_MULTITHREADING _new_thread(_idle_thread, _idle_stack, IDLE_STACK_SIZE, idle, NULL, NULL, NULL, K_LOWEST_THREAD_PRIO, K_ESSENTIAL); _mark_thread_as_started(_idle_thread); _add_thread_to_ready_q(_idle_thread); #endif initialize_timeouts(); /* perform any architecture-specific initialization */ kernel_arch_init(); }