void notrace op_mips_backtrace(struct pt_regs *const regs, unsigned int depth) { struct stackframe frame = { .sp = regs->regs[29], .pc = regs->cp0_epc, .ra = regs->regs[31] }; const int userspace = user_mode(regs); const unsigned long low_addr = ALIGN(frame.sp, THREAD_SIZE); if (userspace) do_user_backtrace(low_addr, &frame, depth); else do_kernel_backtrace(low_addr, &frame, depth); }
static kern_return_t do_kernel_backtrace( thread_t thread, struct x86_kernel_state *regs, uint64_t *frames, mach_msg_type_number_t *start_idx, mach_msg_type_number_t max_idx) { uint64_t kernStackMin = (uint64_t)thread->kernel_stack; uint64_t kernStackMax = (uint64_t)kernStackMin + kernel_stack_size; mach_msg_type_number_t ct = *start_idx; kern_return_t kr = KERN_FAILURE; #if __LP64__ uint64_t currPC = 0ULL; uint64_t currFP = 0ULL; uint64_t prevPC = 0ULL; uint64_t prevFP = 0ULL; if(KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(regs->k_rip), sizeof(uint64_t))) { return KERN_FAILURE; } if(KERN_SUCCESS != chudxnu_kern_read(&currFP, (vm_offset_t)&(regs->k_rbp), sizeof(uint64_t))) { return KERN_FAILURE; } #else uint32_t currPC = 0U; uint32_t currFP = 0U; uint32_t prevPC = 0U; uint32_t prevFP = 0U; if(KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(regs->k_eip), sizeof(uint32_t))) { return KERN_FAILURE; } if(KERN_SUCCESS != chudxnu_kern_read(&currFP, (vm_offset_t)&(regs->k_ebp), sizeof(uint32_t))) { return KERN_FAILURE; } #endif if(*start_idx >= max_idx) return KERN_RESOURCE_SHORTAGE; // no frames traced if(!currPC) { return KERN_FAILURE; } frames[ct++] = (uint64_t)currPC; // build a backtrace of this kernel state #if __LP64__ while(VALID_STACK_ADDRESS64(TRUE, currFP, kernStackMin, kernStackMax)) { // this is the address where caller lives in the user thread uint64_t caller = currFP + sizeof(uint64_t); #else while(VALID_STACK_ADDRESS(TRUE, currFP, kernStackMin, kernStackMax)) { uint32_t caller = (uint32_t)currFP + sizeof(uint32_t); #endif if(!currFP || !currPC) { currPC = 0; break; } if(ct >= max_idx) { *start_idx = ct; return KERN_RESOURCE_SHORTAGE; } /* read our caller */ kr = chudxnu_kern_read(&currPC, (vm_offset_t)caller, sizeof(currPC)); if(kr != KERN_SUCCESS || !currPC) { currPC = 0UL; break; } /* * retrive contents of the frame pointer and advance to the next stack * frame if it's valid */ prevFP = 0; kr = chudxnu_kern_read(&prevFP, (vm_offset_t)currFP, sizeof(currPC)); #if __LP64__ if(VALID_STACK_ADDRESS64(TRUE, prevFP, kernStackMin, kernStackMax)) { #else if(VALID_STACK_ADDRESS(TRUE, prevFP, kernStackMin, kernStackMax)) { #endif frames[ct++] = (uint64_t)currPC; prevPC = currPC; } if(prevFP <= currFP) { break; } else { currFP = prevFP; } } *start_idx = ct; return KERN_SUCCESS; } __private_extern__ kern_return_t chudxnu_thread_get_callstack64( thread_t thread, uint64_t *callstack, mach_msg_type_number_t *count, boolean_t user_only) { kern_return_t kr = KERN_FAILURE; task_t task = thread->task; uint64_t currPC = 0ULL; boolean_t supervisor = FALSE; mach_msg_type_number_t bufferIndex = 0; mach_msg_type_number_t bufferMaxIndex = *count; x86_saved_state_t *tagged_regs = NULL; // kernel register state x86_saved_state64_t *regs64 = NULL; x86_saved_state32_t *regs32 = NULL; x86_saved_state32_t *u_regs32 = NULL; x86_saved_state64_t *u_regs64 = NULL; struct x86_kernel_state *kregs = NULL; if(ml_at_interrupt_context()) { if(user_only) { /* can't backtrace user state on interrupt stack. */ return KERN_FAILURE; } /* backtracing at interrupt context? */ if(thread == current_thread() && current_cpu_datap()->cpu_int_state) { /* * Locate the registers for the interrupted thread, assuming it is * current_thread(). */ tagged_regs = current_cpu_datap()->cpu_int_state; if(is_saved_state64(tagged_regs)) { /* 64 bit registers */ regs64 = saved_state64(tagged_regs); supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U); } else { /* 32 bit registers */ regs32 = saved_state32(tagged_regs); supervisor = ((regs32->cs & SEL_PL) != SEL_PL_U); } } } if(!ml_at_interrupt_context() && kernel_task == task) { if(!thread->kernel_stack) { return KERN_FAILURE; } // Kernel thread not at interrupt context kregs = (struct x86_kernel_state *)NULL; // nofault read of the thread->kernel_stack pointer if(KERN_SUCCESS != chudxnu_kern_read(&kregs, (vm_offset_t)&(thread->kernel_stack), sizeof(void *))) { return KERN_FAILURE; } // Adjust to find the saved kernel state kregs = STACK_IKS((vm_offset_t)(uintptr_t)kregs); supervisor = TRUE; } else if(!tagged_regs) { /* * not at interrupt context, or tracing a different thread than * current_thread() at interrupt context */ tagged_regs = USER_STATE(thread); if(is_saved_state64(tagged_regs)) { /* 64 bit registers */ regs64 = saved_state64(tagged_regs); supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U); } else { /* 32 bit registers */ regs32 = saved_state32(tagged_regs); supervisor = ((regs32->cs & SEL_PL) != SEL_PL_U); } } *count = 0; if(supervisor) { // the caller only wants a user callstack. if(user_only) { // bail - we've only got kernel state return KERN_FAILURE; } } else { // regs32(64) is not in supervisor mode. u_regs32 = regs32; u_regs64 = regs64; regs32 = NULL; regs64 = NULL; } if (user_only) { /* we only want to backtrace the user mode */ if(!(u_regs32 || u_regs64)) { /* no user state to look at */ return KERN_FAILURE; } } /* * Order of preference for top of stack: * 64 bit kernel state (not likely) * 32 bit kernel state * 64 bit user land state * 32 bit user land state */ if(kregs) { /* * nofault read of the registers from the kernel stack (as they can * disappear on the fly). */ #if __LP64__ if(KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(kregs->k_rip), sizeof(uint64_t))) { return KERN_FAILURE; } #else uint32_t tmp; if(KERN_SUCCESS != chudxnu_kern_read(&tmp, (vm_offset_t)&(kregs->k_eip), sizeof(uint32_t))) { return KERN_FAILURE; } currPC = (uint64_t)tmp; #endif } else if(regs64) { currPC = regs64->isf.rip; } else if(regs32) { currPC = (uint64_t) regs32->eip; } else if(u_regs64) { currPC = u_regs64->isf.rip; } else if(u_regs32) { currPC = (uint64_t) u_regs32->eip; } if(!currPC) { /* no top of the stack, bail out */ return KERN_FAILURE; } bufferIndex = 0; if(bufferMaxIndex < 1) { *count = 0; return KERN_RESOURCE_SHORTAGE; } /* backtrace kernel */ if(kregs) { addr64_t address = 0ULL; size_t size = 0UL; // do the backtrace kr = do_kernel_backtrace(thread, kregs, callstack, &bufferIndex, bufferMaxIndex); // and do a nofault read of (r|e)sp #if __LP64__ uint64_t rsp = 0ULL; size = sizeof(uint64_t); if(KERN_SUCCESS != chudxnu_kern_read(&address, (vm_offset_t)&(kregs->k_rsp), size)) { address = 0ULL; } #else uint32_t rsp = 0ULL, tmp = 0ULL; size = sizeof(uint32_t); if(KERN_SUCCESS != chudxnu_kern_read(&tmp, (vm_offset_t)&(kregs->k_esp), size)) { address = 0ULL; } else { address = (addr64_t)tmp; } #endif if(address && KERN_SUCCESS == chudxnu_kern_read(&rsp, (vm_offset_t)address, size) && bufferIndex < bufferMaxIndex) { callstack[bufferIndex++] = (uint64_t)rsp; } } else if(regs64) { uint64_t rsp = 0ULL; // backtrace the 64bit side. kr = do_backtrace64(task, thread, regs64, callstack, &bufferIndex, bufferMaxIndex, TRUE); if(KERN_SUCCESS == chudxnu_kern_read(&rsp, (vm_offset_t) regs64->isf.rsp, sizeof(uint64_t)) && bufferIndex < bufferMaxIndex) { callstack[bufferIndex++] = rsp; } } else if(regs32) { uint32_t esp = 0UL; // backtrace the 32bit side. kr = do_backtrace32(task, thread, regs32, callstack, &bufferIndex, bufferMaxIndex, TRUE); if(KERN_SUCCESS == chudxnu_kern_read(&esp, (vm_offset_t) regs32->uesp, sizeof(uint32_t)) && bufferIndex < bufferMaxIndex) { callstack[bufferIndex++] = (uint64_t) esp; } } else if(u_regs64) { /* backtrace user land */ uint64_t rsp = 0ULL; kr = do_backtrace64(task, thread, u_regs64, callstack, &bufferIndex, bufferMaxIndex, FALSE); if(KERN_SUCCESS == chudxnu_task_read(task, &rsp, (addr64_t) u_regs64->isf.rsp, sizeof(uint64_t)) && bufferIndex < bufferMaxIndex) { callstack[bufferIndex++] = rsp; } } else if(u_regs32) { uint32_t esp = 0UL; kr = do_backtrace32(task, thread, u_regs32, callstack, &bufferIndex, bufferMaxIndex, FALSE); if(KERN_SUCCESS == chudxnu_task_read(task, &esp, (addr64_t) u_regs32->uesp, sizeof(uint32_t)) && bufferIndex < bufferMaxIndex) { callstack[bufferIndex++] = (uint64_t) esp; } } *count = bufferIndex; return kr; }