/* * @brief Initialize a new thread from its stack space * * The thread control structure is put at the lower address of the stack. An * initial context, to be "restored" by __return_from_coop(), is put at * the other end of the stack, and thus reusable by the stack when not * needed anymore. * * The initial context is a basic stack frame that contains arguments for * _thread_entry() return address, that points at _thread_entry() * and status register. * * <options> is currently unused. * * @param pStackmem the pointer to aligned stack memory * @param stackSize the stack size in bytes * @param pEntry thread entry point routine * @param parameter1 first param to entry point * @param parameter2 second param to entry point * @param parameter3 third param to entry point * @param priority thread priority * @param options thread options: K_ESSENTIAL * * @return N/A */ void _new_thread(struct k_thread *thread, k_thread_stack_t stack, size_t stackSize, _thread_entry_t pEntry, void *parameter1, void *parameter2, void *parameter3, int priority, unsigned int options) { char *pStackMem = K_THREAD_STACK_BUFFER(stack); _ASSERT_VALID_PRIO(priority, pEntry); char *stackEnd = pStackMem + stackSize; struct init_stack_frame *pInitCtx; _new_thread_init(thread, pStackMem, stackSize, priority, options); /* carve the thread entry struct from the "base" of the stack */ pInitCtx = (struct init_stack_frame *)(STACK_ROUND_DOWN(stackEnd) - sizeof(struct init_stack_frame)); pInitCtx->pc = ((u32_t)_thread_entry_wrapper); pInitCtx->r0 = (u32_t)pEntry; pInitCtx->r1 = (u32_t)parameter1; pInitCtx->r2 = (u32_t)parameter2; pInitCtx->r3 = (u32_t)parameter3; /* * For now set the interrupt priority to 15 * we can leave interrupt enable flag set to 0 as * seti instruction in the end of the _Swap() will * enable the interrupts based on intlock_key * value. */ #ifdef CONFIG_ARC_STACK_CHECKING pInitCtx->status32 = _ARC_V2_STATUS32_SC | _ARC_V2_STATUS32_E(_ARC_V2_DEF_IRQ_LEVEL); thread->arch.stack_base = (u32_t) stackEnd; #else pInitCtx->status32 = _ARC_V2_STATUS32_E(_ARC_V2_DEF_IRQ_LEVEL); #endif #ifdef CONFIG_THREAD_MONITOR /* * In debug mode thread->entry give direct access to the thread entry * and the corresponding parameters. */ thread->entry = (struct __thread_entry *)(pInitCtx); #endif /* * intlock_key is constructed based on ARCv2 ISA Programmer's * Reference Manual CLRI instruction description: * dst[31:6] dst[5] dst[4] dst[3:0] * 26'd0 1 STATUS32.IE STATUS32.E[3:0] */ thread->arch.intlock_key = 0x3F; thread->arch.relinquish_cause = _CAUSE_COOP; thread->callee_saved.sp = (u32_t)pInitCtx - ___callee_saved_stack_t_SIZEOF; /* initial values in all other regs/k_thread entries are irrelevant */ thread_monitor_init(thread); }
void _new_thread(char *pStackMem, unsigned stackSize, _thread_entry_t pEntry, void *parameter1, void *parameter2, void *parameter3, int priority, unsigned options) { char *stackEnd = pStackMem + stackSize; struct __esf *pInitCtx; struct tcs *tcs = (struct tcs *) pStackMem; #ifdef CONFIG_INIT_STACKS memset(pStackMem, 0xaa, stackSize); #endif /* carve the thread entry struct from the "base" of the stack */ pInitCtx = (struct __esf *)(STACK_ROUND_DOWN(stackEnd) - sizeof(struct __esf)); pInitCtx->pc = ((uint32_t)_thread_entry) & 0xfffffffe; pInitCtx->a1 = (uint32_t)pEntry; pInitCtx->a2 = (uint32_t)parameter1; pInitCtx->a3 = (uint32_t)parameter2; pInitCtx->a4 = (uint32_t)parameter3; pInitCtx->xpsr = 0x01000000UL; /* clear all, thumb bit is 1, even if RO */ tcs->link = NULL; tcs->flags = priority == -1 ? TASK | PREEMPTIBLE : FIBER; tcs->prio = priority; #ifdef CONFIG_THREAD_CUSTOM_DATA /* Initialize custom data field (value is opaque to kernel) */ tcs->custom_data = NULL; #endif #ifdef CONFIG_THREAD_MONITOR /* * In debug mode tcs->entry give direct access to the thread entry * and the corresponding parameters. */ tcs->entry = (struct __thread_entry *)(pInitCtx); #endif tcs->preempReg.psp = (uint32_t)pInitCtx; tcs->basepri = 0; _nano_timeout_tcs_init(tcs); /* initial values in all other registers/TCS entries are irrelevant */ THREAD_MONITOR_INIT(tcs); }
void _new_thread(struct k_thread *thread, k_thread_stack_t *stack, size_t stack_size, k_thread_entry_t thread_func, void *arg1, void *arg2, void *arg3, int priority, unsigned int options) { char *stack_memory = K_THREAD_STACK_BUFFER(stack); _ASSERT_VALID_PRIO(priority, thread_func); struct __esf *stack_init; _new_thread_init(thread, stack_memory, stack_size, priority, options); /* Initial stack frame for thread */ stack_init = (struct __esf *) STACK_ROUND_DOWN(stack_memory + stack_size - sizeof(struct __esf)); /* Setup the initial stack frame */ stack_init->a0 = (u32_t)thread_func; stack_init->a1 = (u32_t)arg1; stack_init->a2 = (u32_t)arg2; stack_init->a3 = (u32_t)arg3; /* * Following the RISC-V architecture, * the MSTATUS register (used to globally enable/disable interrupt), * as well as the MEPC register (used to by the core to save the * value of the program counter at which an interrupt/exception occcurs) * need to be saved on the stack, upon an interrupt/exception * and restored prior to returning from the interrupt/exception. * This shall allow to handle nested interrupts. * * Given that context switching is performed via a system call exception * within the RISCV32 architecture implementation, initially set: * 1) MSTATUS to SOC_MSTATUS_DEF_RESTORE in the thread stack to enable * interrupts when the newly created thread will be scheduled; * 2) MEPC to the address of the _thread_entry_wrapper in the thread * stack. * Hence, when going out of an interrupt/exception/context-switch, * after scheduling the newly created thread: * 1) interrupts will be enabled, as the MSTATUS register will be * restored following the MSTATUS value set within the thread stack; * 2) the core will jump to _thread_entry_wrapper, as the program * counter will be restored following the MEPC value set within the * thread stack. */ stack_init->mstatus = SOC_MSTATUS_DEF_RESTORE; stack_init->mepc = (u32_t)_thread_entry_wrapper; thread->callee_saved.sp = (u32_t)stack_init; thread_monitor_init(thread); }
void _new_thread(char *pStackMem, size_t stackSize, _thread_entry_t pEntry, void *parameter1, void *parameter2, void *parameter3, int priority, unsigned int options) { _ASSERT_VALID_PRIO(priority, pEntry); __ASSERT(!((u32_t)pStackMem & (STACK_ALIGN - 1)), "stack is not aligned properly\n" "%d-byte alignment required\n", STACK_ALIGN); char *stackEnd = pStackMem + stackSize; struct __esf *pInitCtx; struct k_thread *thread = (struct k_thread *) pStackMem; thread = _new_thread_init(pStackMem, stackSize, priority, options); /* carve the thread entry struct from the "base" of the stack */ pInitCtx = (struct __esf *)(STACK_ROUND_DOWN(stackEnd) - sizeof(struct __esf)); pInitCtx->pc = ((u32_t)_thread_entry) & 0xfffffffe; pInitCtx->a1 = (u32_t)pEntry; pInitCtx->a2 = (u32_t)parameter1; pInitCtx->a3 = (u32_t)parameter2; pInitCtx->a4 = (u32_t)parameter3; pInitCtx->xpsr = 0x01000000UL; /* clear all, thumb bit is 1, even if RO */ #ifdef CONFIG_THREAD_MONITOR /* * In debug mode thread->entry give direct access to the thread entry * and the corresponding parameters. */ thread->entry = (struct __thread_entry *)(pInitCtx); #endif thread->callee_saved.psp = (u32_t)pInitCtx; thread->arch.basepri = 0; /* swap_return_value can contain garbage */ /* * initial values in all other registers/thread entries are * irrelevant. */ thread_monitor_init(thread); }
/** * @brief Create a new kernel execution thread * * Initializes the k_thread object and sets up initial stack frame. * * @param thread pointer to thread struct memory, including any space needed * for extra coprocessor context * @param stack the pointer to aligned stack memory * @param stack_size the stack size in bytes * @param entry thread entry point routine * @param arg1 first param to entry point * @param arg2 second param to entry point * @param arg3 third param to entry point * @param priority thread priority * @param options thread options: K_ESSENTIAL, K_FP_REGS, K_SSE_REGS * * Note that in this arch we cheat quite a bit: we use as stack a normal * pthreads stack and therefore we ignore the stack size * */ void _new_thread(struct k_thread *thread, k_thread_stack_t *stack, size_t stack_size, k_thread_entry_t thread_func, void *arg1, void *arg2, void *arg3, int priority, unsigned int options) { char *stack_memory = K_THREAD_STACK_BUFFER(stack); _ASSERT_VALID_PRIO(priority, thread_func); posix_thread_status_t *thread_status; _new_thread_init(thread, stack_memory, stack_size, priority, options); /* We store it in the same place where normal archs store the * "initial stack frame" */ thread_status = (posix_thread_status_t *) STACK_ROUND_DOWN(stack_memory + stack_size - sizeof(*thread_status)); /* _thread_entry() arguments */ thread_status->entry_point = thread_func; thread_status->arg1 = arg1; thread_status->arg2 = arg2; thread_status->arg3 = arg3; #if defined(CONFIG_ARCH_HAS_THREAD_ABORT) thread_status->aborted = 0; #endif thread->callee_saved.thread_status = (u32_t)thread_status; posix_new_thread(thread_status); thread_monitor_init(thread); }
/** * @brief Check if a memory address range falls within the stack * * Given a memory address range, ensure that it falls within the bounds * of the faulting context's stack. * * @param addr Starting address * @param size Size of the region, or 0 if we just want to see if addr is * in bounds * @param cs Code segment of faulting context * @return true if addr/size region is not within the thread stack */ static bool check_stack_bounds(u32_t addr, size_t size, u16_t cs) { u32_t start, end; if (_is_in_isr()) { /* We were servicing an interrupt */ start = (u32_t)_ARCH_THREAD_STACK_BUFFER(_interrupt_stack); end = start + CONFIG_ISR_STACK_SIZE; } else if ((cs & 0x3) != 0 || (_current->base.user_options & K_USER) == 0) { /* Thread was in user mode, or is not a user mode thread. * The normal stack buffer is what we will check. */ start = _current->stack_info.start; end = STACK_ROUND_DOWN(_current->stack_info.start + _current->stack_info.size); } else { /* User thread was doing a syscall, check kernel stack bounds */ start = _current->stack_info.start - MMU_PAGE_SIZE; end = _current->stack_info.start; } return (addr <= start) || (addr + size > end); }
/** * * @brief Initialize a new execution thread * * This function is utilized to initialize all execution threads (both fiber * and task). The 'priority' parameter will be set to -1 for the creation of * task. * * This function is called by _new_thread() to initialize tasks. * * @param pStackMem pointer to thread stack memory * @param stackSize size of a stack in bytes * @param thread priority * @param options thread options: USE_FP, USE_SSE * * @return N/A */ static void _new_thread_internal(char *pStackMem, unsigned stackSize, int priority, unsigned options) { unsigned long *pInitialCtx; /* ptr to the new task's tcs */ struct tcs *tcs = (struct tcs *)pStackMem; #ifndef CONFIG_FP_SHARING ARG_UNUSED(options); #endif /* !CONFIG_FP_SHARING */ tcs->link = (struct tcs *)NULL; /* thread not inserted into list yet */ tcs->prio = priority; #if (defined(CONFIG_FP_SHARING) || defined(CONFIG_GDB_INFO)) tcs->excNestCount = 0; #endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */ if (priority == -1) tcs->flags = PREEMPTIBLE | TASK; else tcs->flags = FIBER; #ifdef CONFIG_THREAD_CUSTOM_DATA /* Initialize custom data field (value is opaque to kernel) */ tcs->custom_data = NULL; #endif /* * The creation of the initial stack for the task has already been done. * Now all that is needed is to set the ESP. However, we have been passed * the base address of the stack which is past the initial stack frame. * Therefore some of the calculations done in the other routines that * initialize the stack frame need to be repeated. */ pInitialCtx = (unsigned long *)STACK_ROUND_DOWN(pStackMem + stackSize); /* * We subtract 11 here to account for the thread entry routine * parameters * (4 of them), eflags, eip, and the edi/esi/ebx/ebp/eax registers. */ pInitialCtx -= 11; tcs->coopReg.esp = (unsigned long)pInitialCtx; PRINTK("\nInitial context ESP = 0x%x\n", tcs->coopReg.esp); #ifdef CONFIG_FP_SHARING /* * Indicate if the thread is permitted to use floating point instructions. * * The first time the new thread is scheduled by _Swap() it is guaranteed * to inherit an FPU that is in a "sane" state (if the most recent user of * the FPU was cooperatively swapped out) or a completely "clean" state * (if the most recent user of the FPU was pre-empted, or if the new thread * is the first user of the FPU). * * The USE_FP flag bit is set in the struct tcs structure if a thread is * authorized to use _any_ non-integer capability, whether it's the basic * x87 FPU/MMX capability, SSE instructions, or a combination of both. The * USE_SSE flag bit is set only if a thread can use SSE instructions. * * Note: Callers need not follow the aforementioned protocol when passing * in thread options. It is legal for the caller to specify _only_ the * USE_SSE option bit if a thread will be utilizing SSE instructions (and * possibly x87 FPU/MMX instructions). */ /* * Implementation Remark: * Until SysGen reserves SSE_GROUP as 0x10, the following conditional is * required so that at least systems configured with FLOAT will still operate * correctly. The issue is that SysGen will utilize group 0x10 user-defined * groups, and thus tasks placed in the user-defined group will have the * SSE_GROUP (but not the FPU_GROUP) bit set. This results in both the USE_FP * and USE_SSE bits being set in the struct tcs. For systems configured only with * FLOAT, the setting of the USE_SSE is harmless, but the setting of USE_FP is * wasteful. Thus to ensure that that systems configured only with FLOAT * behave as expected, the USE_SSE option bit is ignored. * * Clearly, even with the following conditional, systems configured with * SSE will not behave as expected since tasks may still be inadvertantly * have the USE_SSE+USE_FP sets even though they are integer only. * * Once the generator tool has been updated to reserve the SSE_GROUP, the * correct code to use is: * * options &= USE_FP | USE_SSE; * */ #ifdef CONFIG_SSE options &= USE_FP | USE_SSE; #else options &= USE_FP; #endif if (options != 0) { tcs->flags |= (options | USE_FP); } #endif /* CONFIG_FP_SHARING */ PRINTK("\nstruct tcs * = 0x%x", tcs); #if defined(CONFIG_THREAD_MONITOR) { unsigned int imask; /* * Add the newly initialized thread to head of the list of threads. * This singly linked list of threads maintains ALL the threads in the * system: both tasks and fibers regardless of whether they are * runnable. */ imask = irq_lock(); tcs->next_thread = _nanokernel.threads; _nanokernel.threads = tcs; irq_unlock(imask); } #endif /* CONFIG_THREAD_MONITOR */ _nano_timeout_tcs_init(tcs); }
/** * * @brief Create a new kernel execution thread * * This function is utilized to create execution threads for both fiber * threads and kernel tasks. * * The "thread control block" (TCS) is carved from the "end" of the specified * thread stack memory. * * @param pStackmem the pointer to aligned stack memory * @param stackSize the stack size in bytes * @param pEntry thread entry point routine * @param parameter1 first param to entry point * @param parameter2 second param to entry point * @param parameter3 third param to entry point * @param priority thread priority * @param options thread options: USE_FP, USE_SSE * * * @return opaque pointer to initialized TCS structure */ void _new_thread(char *pStackMem, unsigned stackSize, _thread_entry_t pEntry, void *parameter1, void *parameter2, void *parameter3, int priority, unsigned options) { unsigned long *pInitialThread; #ifdef CONFIG_INIT_STACKS memset(pStackMem, 0xaa, stackSize); #endif /* carve the thread entry struct from the "base" of the stack */ pInitialThread = (unsigned long *)STACK_ROUND_DOWN(pStackMem + stackSize); /* * Create an initial context on the stack expected by the _Swap() * primitive. * Given that both task and fibers execute at privilege 0, the * setup for both threads are equivalent. */ /* push arguments required by _thread_entry() */ *--pInitialThread = (unsigned long)parameter3; *--pInitialThread = (unsigned long)parameter2; *--pInitialThread = (unsigned long)parameter1; *--pInitialThread = (unsigned long)pEntry; /* push initial EFLAGS; only modify IF and IOPL bits */ *--pInitialThread = (EflagsGet() & ~EFLAGS_MASK) | EFLAGS_INITIAL; #ifdef CONFIG_GDB_INFO /* * Arrange for the _thread_entry_wrapper() function to be called * to adjust the stack before _thread_entry() is invoked. */ *--pInitialThread = (unsigned long)_thread_entry_wrapper; #else /* CONFIG_GDB_INFO */ *--pInitialThread = (unsigned long)_thread_entry; #endif /* CONFIG_GDB_INFO */ /* * note: stack area for edi, esi, ebx, ebp, and eax registers can be * left * uninitialized, since _thread_entry() doesn't care about the values * of these registers when it begins execution */ /* * For kernel tasks and fibers the thread the thread control struct (TCS) * is located at the "low end" of memory set aside for the thread's stack. */ _new_thread_internal(pStackMem, stackSize, priority, options); }
void _NewContext( char *pStackMem, /* pointer to aligned stack memory */ unsigned stackSize, /* size of stack in bytes */ _ContextEntry pEntry, /* context entry point function */ void *parameter1, /* first parameter to context entry point function */ void *parameter2, /* second parameter to context entry point function */ void *parameter3, /* third parameter to context entry point function */ int priority, /* context priority */ unsigned options /* context options: USE_FP, USE_SSE */ ) { unsigned long *pInitialContext; #ifdef CONFIG_INIT_STACKS k_memset(pStackMem, 0xaa, stackSize); #endif /* carve the context entry struct from the "base" of the stack */ pInitialContext = (unsigned long *)STACK_ROUND_DOWN(pStackMem + stackSize); /* * Create an initial context on the stack expected by the _Swap() * primitive. * Given that both task and fiber contexts execute at privilege 0, the * setup for both contexts are equivalent. */ /* push arguments required by _context_entry() */ *--pInitialContext = (unsigned long)parameter3; *--pInitialContext = (unsigned long)parameter2; *--pInitialContext = (unsigned long)parameter1; *--pInitialContext = (unsigned long)pEntry; /* push initial EFLAGS; only modify IF and IOPL bits */ *--pInitialContext = (EflagsGet() & ~EFLAGS_MASK) | EFLAGS_INITIAL; #ifdef CONFIG_GDB_INFO /* * Arrange for the _ContextEntryWrapper() function to be called * to adjust the stack before _context_entry() is invoked. */ *--pInitialContext = (unsigned long)_ContextEntryWrapper; #else /* CONFIG_GDB_INFO */ *--pInitialContext = (unsigned long)_context_entry; #endif /* CONFIG_GDB_INFO */ /* * note: stack area for edi, esi, ebx, ebp, and eax registers can be * left * uninitialized, since _context_entry() doesn't care about the values * of these registers when it begins execution */ /* * For kernel tasks and fibers the context the context control struct * (CCS) * is located at the "low end" of memory set aside for the context's * stack */ _NewContextInternal(pStackMem, stackSize, priority, options); }
void _new_thread(char *pStackMem, size_t stackSize, _thread_entry_t pEntry, void *parameter1, void *parameter2, void *parameter3, int priority, unsigned options) { _ASSERT_VALID_PRIO(priority, pEntry); __ASSERT(!((uint32_t)pStackMem & (STACK_ALIGN - 1)), "stack is not aligned properly\n" "%d-byte alignment required\n", STACK_ALIGN); char *stackEnd = pStackMem + stackSize; struct __esf *pInitCtx; struct tcs *tcs = (struct tcs *) pStackMem; #ifdef CONFIG_INIT_STACKS memset(pStackMem, 0xaa, stackSize); #endif /* carve the thread entry struct from the "base" of the stack */ pInitCtx = (struct __esf *)(STACK_ROUND_DOWN(stackEnd) - sizeof(struct __esf)); pInitCtx->pc = ((uint32_t)_thread_entry) & 0xfffffffe; pInitCtx->a1 = (uint32_t)pEntry; pInitCtx->a2 = (uint32_t)parameter1; pInitCtx->a3 = (uint32_t)parameter2; pInitCtx->a4 = (uint32_t)parameter3; pInitCtx->xpsr = 0x01000000UL; /* clear all, thumb bit is 1, even if RO */ _init_thread_base(&tcs->base, priority, K_PRESTART, options); /* static threads overwrite it afterwards with real value */ tcs->init_data = NULL; tcs->fn_abort = NULL; #ifdef CONFIG_THREAD_CUSTOM_DATA /* Initialize custom data field (value is opaque to kernel) */ tcs->custom_data = NULL; #endif #ifdef CONFIG_THREAD_MONITOR /* * In debug mode tcs->entry give direct access to the thread entry * and the corresponding parameters. */ tcs->entry = (struct __thread_entry *)(pInitCtx); #endif tcs->callee_saved.psp = (uint32_t)pInitCtx; tcs->arch.basepri = 0; /* swap_return_value can contain garbage */ /* initial values in all other registers/TCS entries are irrelevant */ thread_monitor_init(tcs); }