int uv_thread_create(uv_thread_t *tid, void (*entry)(void *arg), void *arg) { int err; size_t stack_size; pthread_attr_t* attr; pthread_attr_t attr_storage; attr = NULL; stack_size = thread_stack_size(); if (stack_size > 0) { attr = &attr_storage; if (pthread_attr_init(attr)) abort(); if (pthread_attr_setstacksize(attr, stack_size)) abort(); } err = pthread_create(tid, attr, (void*(*)(void*)) (void(*)(void)) entry, arg); if (attr != NULL) pthread_attr_destroy(attr); return UV__ERR(err); }
/* * Kernel or user mode unwind (32-bit execution state). */ static void __print_stack_unwind_arm32(struct abort_info *ai) { struct unwind_state_arm32 state; vaddr_t exidx; size_t exidx_sz; uint32_t mode = ai->regs->spsr & CPSR_MODE_MASK; uint32_t sp; uint32_t lr; vaddr_t stack; size_t stack_size; bool kernel_stack; if (abort_is_user_exception(ai)) { get_current_ta_exidx_stack(&exidx, &exidx_sz, &stack, &stack_size); if (!exidx) { EMSG_RAW("Call stack not available"); return; } kernel_stack = false; } else { exidx = (vaddr_t)__exidx_start; exidx_sz = (vaddr_t)__exidx_end - (vaddr_t)__exidx_start; /* Kernel stack */ stack = thread_stack_start(); stack_size = thread_stack_size(); kernel_stack = true; } if (mode == CPSR_MODE_USR || mode == CPSR_MODE_SYS) { sp = ai->regs->usr_sp; lr = ai->regs->usr_lr; } else { sp = read_mode_sp(mode); lr = read_mode_lr(mode); } memset(&state, 0, sizeof(state)); state.registers[0] = ai->regs->r0; state.registers[1] = ai->regs->r1; state.registers[2] = ai->regs->r2; state.registers[3] = ai->regs->r3; state.registers[4] = ai->regs->r4; state.registers[5] = ai->regs->r5; state.registers[6] = ai->regs->r6; state.registers[7] = ai->regs->r7; state.registers[8] = ai->regs->r8; state.registers[9] = ai->regs->r9; state.registers[10] = ai->regs->r10; state.registers[11] = ai->regs->r11; state.registers[13] = sp; state.registers[14] = lr; state.registers[15] = ai->pc; print_stack_arm32(TRACE_ERROR, &state, exidx, exidx_sz, kernel_stack, stack, stack_size); }