void _new_thread(char *pStackMem, unsigned stackSize, _thread_entry_t pEntry, void *parameter1, void *parameter2, void *parameter3, int priority, unsigned options) { char *stackEnd = pStackMem + stackSize; struct __esf *pInitCtx; struct tcs *tcs = (struct tcs *) pStackMem; #ifdef CONFIG_INIT_STACKS memset(pStackMem, 0xaa, stackSize); #endif /* carve the thread entry struct from the "base" of the stack */ pInitCtx = (struct __esf *)(STACK_ROUND_DOWN(stackEnd) - sizeof(struct __esf)); pInitCtx->pc = ((uint32_t)_thread_entry) & 0xfffffffe; pInitCtx->a1 = (uint32_t)pEntry; pInitCtx->a2 = (uint32_t)parameter1; pInitCtx->a3 = (uint32_t)parameter2; pInitCtx->a4 = (uint32_t)parameter3; pInitCtx->xpsr = 0x01000000UL; /* clear all, thumb bit is 1, even if RO */ tcs->link = NULL; tcs->flags = priority == -1 ? TASK | PREEMPTIBLE : FIBER; tcs->prio = priority; #ifdef CONFIG_THREAD_CUSTOM_DATA /* Initialize custom data field (value is opaque to kernel) */ tcs->custom_data = NULL; #endif #ifdef CONFIG_THREAD_MONITOR /* * In debug mode tcs->entry give direct access to the thread entry * and the corresponding parameters. */ tcs->entry = (struct __thread_entry *)(pInitCtx); #endif tcs->preempReg.psp = (uint32_t)pInitCtx; tcs->basepri = 0; _nano_timeout_tcs_init(tcs); /* initial values in all other registers/TCS entries are irrelevant */ THREAD_MONITOR_INIT(tcs); }
/** * * @brief Initialize a new execution thread * * This function is utilized to initialize all execution threads (both fiber * and task). The 'priority' parameter will be set to -1 for the creation of * task. * * This function is called by _new_thread() to initialize tasks. * * @param pStackMem pointer to thread stack memory * @param stackSize size of a stack in bytes * @param thread priority * @param options thread options: USE_FP, USE_SSE * * @return N/A */ static void _new_thread_internal(char *pStackMem, unsigned stackSize, int priority, unsigned options) { unsigned long *pInitialCtx; /* ptr to the new task's tcs */ struct tcs *tcs = (struct tcs *)pStackMem; #ifndef CONFIG_FP_SHARING ARG_UNUSED(options); #endif /* !CONFIG_FP_SHARING */ tcs->link = (struct tcs *)NULL; /* thread not inserted into list yet */ tcs->prio = priority; #if (defined(CONFIG_FP_SHARING) || defined(CONFIG_GDB_INFO)) tcs->excNestCount = 0; #endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */ if (priority == -1) tcs->flags = PREEMPTIBLE | TASK; else tcs->flags = FIBER; #ifdef CONFIG_THREAD_CUSTOM_DATA /* Initialize custom data field (value is opaque to kernel) */ tcs->custom_data = NULL; #endif /* * The creation of the initial stack for the task has already been done. * Now all that is needed is to set the ESP. However, we have been passed * the base address of the stack which is past the initial stack frame. * Therefore some of the calculations done in the other routines that * initialize the stack frame need to be repeated. */ pInitialCtx = (unsigned long *)STACK_ROUND_DOWN(pStackMem + stackSize); /* * We subtract 11 here to account for the thread entry routine * parameters * (4 of them), eflags, eip, and the edi/esi/ebx/ebp/eax registers. */ pInitialCtx -= 11; tcs->coopReg.esp = (unsigned long)pInitialCtx; PRINTK("\nInitial context ESP = 0x%x\n", tcs->coopReg.esp); #ifdef CONFIG_FP_SHARING /* * Indicate if the thread is permitted to use floating point instructions. * * The first time the new thread is scheduled by _Swap() it is guaranteed * to inherit an FPU that is in a "sane" state (if the most recent user of * the FPU was cooperatively swapped out) or a completely "clean" state * (if the most recent user of the FPU was pre-empted, or if the new thread * is the first user of the FPU). * * The USE_FP flag bit is set in the struct tcs structure if a thread is * authorized to use _any_ non-integer capability, whether it's the basic * x87 FPU/MMX capability, SSE instructions, or a combination of both. The * USE_SSE flag bit is set only if a thread can use SSE instructions. * * Note: Callers need not follow the aforementioned protocol when passing * in thread options. It is legal for the caller to specify _only_ the * USE_SSE option bit if a thread will be utilizing SSE instructions (and * possibly x87 FPU/MMX instructions). */ /* * Implementation Remark: * Until SysGen reserves SSE_GROUP as 0x10, the following conditional is * required so that at least systems configured with FLOAT will still operate * correctly. The issue is that SysGen will utilize group 0x10 user-defined * groups, and thus tasks placed in the user-defined group will have the * SSE_GROUP (but not the FPU_GROUP) bit set. This results in both the USE_FP * and USE_SSE bits being set in the struct tcs. For systems configured only with * FLOAT, the setting of the USE_SSE is harmless, but the setting of USE_FP is * wasteful. Thus to ensure that that systems configured only with FLOAT * behave as expected, the USE_SSE option bit is ignored. * * Clearly, even with the following conditional, systems configured with * SSE will not behave as expected since tasks may still be inadvertantly * have the USE_SSE+USE_FP sets even though they are integer only. * * Once the generator tool has been updated to reserve the SSE_GROUP, the * correct code to use is: * * options &= USE_FP | USE_SSE; * */ #ifdef CONFIG_SSE options &= USE_FP | USE_SSE; #else options &= USE_FP; #endif if (options != 0) { tcs->flags |= (options | USE_FP); } #endif /* CONFIG_FP_SHARING */ PRINTK("\nstruct tcs * = 0x%x", tcs); #if defined(CONFIG_THREAD_MONITOR) { unsigned int imask; /* * Add the newly initialized thread to head of the list of threads. * This singly linked list of threads maintains ALL the threads in the * system: both tasks and fibers regardless of whether they are * runnable. */ imask = irq_lock(); tcs->next_thread = _nanokernel.threads; _nanokernel.threads = tcs; irq_unlock(imask); } #endif /* CONFIG_THREAD_MONITOR */ _nano_timeout_tcs_init(tcs); }