void __show_regs(struct pt_regs *regs) { int i, top_reg; u64 lr, sp; if (compat_user_mode(regs)) { lr = regs->compat_lr; sp = regs->compat_sp; top_reg = 12; } else { lr = regs->regs[30]; sp = regs->sp; top_reg = 29; } show_regs_print_info(KERN_DEFAULT); print_symbol("PC is at %s\n", instruction_pointer(regs)); print_symbol("LR is at %s\n", lr); printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n", regs->pc, lr, regs->pstate); printk("sp : %016llx\n", sp); for (i = top_reg; i >= 0; i--) { printk("x%-2d: %016llx ", i, regs->regs[i]); if (i % 2 == 0) printk("\n"); } printk("\n"); }
u64 perf_reg_value(struct pt_regs *regs, int idx) { if (WARN_ON_ONCE((u32)idx >= PERF_REG_ARM64_MAX)) return 0; /* * Compat (i.e. 32 bit) mode: * - PC has been set in the pt_regs struct in kernel_entry, * - Handle SP and LR here. */ if (compat_user_mode(regs)) { if ((u32)idx == PERF_REG_ARM64_SP) return regs->compat_sp; if ((u32)idx == PERF_REG_ARM64_LR) return regs->compat_lr; } if ((u32)idx == PERF_REG_ARM64_SP) return regs->sp; if ((u32)idx == PERF_REG_ARM64_PC) return regs->pc; return regs->regs[idx]; }
static int __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs) { struct task_struct *tsk = thread->task; static int die_counter; int ret; pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n", str, err, ++die_counter); /* trap and error numbers are mostly meaningless on ARM */ ret = notify_die(DIE_OOPS, str, regs, err, 0, SIGSEGV); if (ret == NOTIFY_STOP) return ret; print_modules(); __show_regs(regs); pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n", TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1); if (!user_mode(regs) || in_interrupt()) { dump_mem(KERN_EMERG, "Stack: ", regs->sp, THREAD_SIZE + (unsigned long)task_stack_page(tsk), compat_user_mode(regs)); dump_backtrace(regs, tsk); dump_instr(KERN_EMERG, regs); } return ret; }
void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { /* We don't support guest os callchain now */ return; } perf_callchain_store(entry, regs->pc); if (!compat_user_mode(regs)) { /* AARCH64 mode */ struct frame_tail __user *tail; tail = (struct frame_tail __user *)regs->regs[29]; while (entry->nr < entry->max_stack && tail && !((unsigned long)tail & 0xf)) tail = user_backtrace(tail, entry); } else { #ifdef CONFIG_COMPAT /* AARCH32 compat mode */ struct compat_frame_tail __user *tail; tail = (struct compat_frame_tail __user *)regs->compat_fp - 1; while ((entry->nr < entry->max_stack) && tail && !((unsigned long)tail & 0x3)) tail = compat_user_backtrace(tail, entry); #endif } }
static void print_pstate(struct pt_regs *regs) { u64 pstate = regs->pstate; if (compat_user_mode(regs)) { printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c)\n", pstate, pstate & PSR_AA32_N_BIT ? 'N' : 'n', pstate & PSR_AA32_Z_BIT ? 'Z' : 'z', pstate & PSR_AA32_C_BIT ? 'C' : 'c', pstate & PSR_AA32_V_BIT ? 'V' : 'v', pstate & PSR_AA32_Q_BIT ? 'Q' : 'q', pstate & PSR_AA32_T_BIT ? "T32" : "A32", pstate & PSR_AA32_E_BIT ? "BE" : "LE", pstate & PSR_AA32_A_BIT ? 'A' : 'a', pstate & PSR_AA32_I_BIT ? 'I' : 'i', pstate & PSR_AA32_F_BIT ? 'F' : 'f'); } else { printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO)\n", pstate, pstate & PSR_N_BIT ? 'N' : 'n', pstate & PSR_Z_BIT ? 'Z' : 'z', pstate & PSR_C_BIT ? 'C' : 'c', pstate & PSR_V_BIT ? 'V' : 'v', pstate & PSR_D_BIT ? 'D' : 'd', pstate & PSR_A_BIT ? 'A' : 'a', pstate & PSR_I_BIT ? 'I' : 'i', pstate & PSR_F_BIT ? 'F' : 'f', pstate & PSR_PAN_BIT ? '+' : '-', pstate & PSR_UAO_BIT ? '+' : '-'); } }
void fiq_debugger_dump_regs(struct fiq_debugger_output *output, const struct pt_regs *regs) { if (compat_user_mode(regs)) fiq_debugger_dump_regs_aarch32(output, regs); else fiq_debugger_dump_regs_aarch64(output, regs); }
static char *mode_name(const struct pt_regs *regs) { if (compat_user_mode(regs)) { return "USR"; } else { switch (processor_mode(regs)) { case PSR_MODE_EL0t: return "EL0t"; case PSR_MODE_EL1t: return "EL1t"; case PSR_MODE_EL1h: return "EL1h"; case PSR_MODE_EL2t: return "EL2t"; case PSR_MODE_EL2h: return "EL2h"; default: return "???"; } } }
void __show_regs(struct pt_regs *regs) { int i, top_reg; u64 lr, sp; if (compat_user_mode(regs)) { lr = regs->compat_lr; sp = regs->compat_sp; top_reg = 12; } else { lr = regs->regs[30]; sp = regs->sp; top_reg = 29; } show_regs_print_info(KERN_DEFAULT); print_pstate(regs); if (!user_mode(regs)) { printk("pc : %pS\n", (void *)regs->pc); printk("lr : %pS\n", (void *)lr); } else { printk("pc : %016llx\n", regs->pc); printk("lr : %016llx\n", lr); } printk("sp : %016llx\n", sp); if (system_uses_irq_prio_masking()) printk("pmr_save: %08llx\n", regs->pmr_save); i = top_reg; while (i >= 0) { printk("x%-2d: %016llx ", i, regs->regs[i]); i--; if (i % 2 == 0) { pr_cont("x%-2d: %016llx ", i, regs->regs[i]); i--; } pr_cont("\n"); } }
asmlinkage void __exception do_undefinstr(struct pt_regs *regs) { siginfo_t info; void __user *pc = (void __user *)instruction_pointer(regs); #ifdef CONFIG_COMPAT /* check for AArch32 breakpoint instructions */ if (compat_user_mode(regs) && aarch32_break_trap(regs) == 0) return; #endif if (show_unhandled_signals) { pr_info("%s[%d]: undefined instruction: pc=%p\n", current->comm, task_pid_nr(current), pc); dump_instr(KERN_INFO, regs); } info.si_signo = SIGILL; info.si_errno = 0; info.si_code = ILL_ILLOPC; info.si_addr = pc; arm64_notify_die("Oops - undefined instruction", regs, &info, 0); }
static void read_all_sources(struct pt_regs *regs, struct task_struct *task) { u32 state, extra_data = 0; int i, vec_idx = 0, bt_size = 0; int nr_events = 0, nr_positive_events = 0; struct pt_regs *user_regs; struct quadd_iovec vec[5]; struct hrt_event_value events[QUADD_MAX_COUNTERS]; u32 events_extra[QUADD_MAX_COUNTERS]; struct quadd_record_data record_data; struct quadd_sample_data *s = &record_data.sample; struct quadd_ctx *ctx = hrt.quadd_ctx; struct quadd_cpu_context *cpu_ctx = this_cpu_ptr(hrt.cpu_ctx); struct quadd_callchain *cc = &cpu_ctx->cc; if (!regs) return; if (atomic_read(&cpu_ctx->nr_active) == 0) return; if (!task) task = current; rcu_read_lock(); if (!task_nsproxy(task)) { rcu_read_unlock(); return; } rcu_read_unlock(); if (ctx->pmu && ctx->pmu_info.active) nr_events += read_source(ctx->pmu, regs, events, QUADD_MAX_COUNTERS); if (ctx->pl310 && ctx->pl310_info.active) nr_events += read_source(ctx->pl310, regs, events + nr_events, QUADD_MAX_COUNTERS - nr_events); if (!nr_events) return; if (user_mode(regs)) user_regs = regs; else user_regs = current_pt_regs(); if (get_sample_data(s, regs, task)) return; vec[vec_idx].base = &extra_data; vec[vec_idx].len = sizeof(extra_data); vec_idx++; s->reserved = 0; if (ctx->param.backtrace) { cc->unw_method = hrt.unw_method; bt_size = quadd_get_user_callchain(user_regs, cc, ctx, task); if (!bt_size && !user_mode(regs)) { unsigned long pc = instruction_pointer(user_regs); cc->nr = 0; #ifdef CONFIG_ARM64 cc->cs_64 = compat_user_mode(user_regs) ? 0 : 1; #else cc->cs_64 = 0; #endif bt_size += quadd_callchain_store(cc, pc, QUADD_UNW_TYPE_KCTX); } if (bt_size > 0) { int ip_size = cc->cs_64 ? sizeof(u64) : sizeof(u32); int nr_types = DIV_ROUND_UP(bt_size, 8); vec[vec_idx].base = cc->cs_64 ? (void *)cc->ip_64 : (void *)cc->ip_32; vec[vec_idx].len = bt_size * ip_size; vec_idx++; vec[vec_idx].base = cc->types; vec[vec_idx].len = nr_types * sizeof(cc->types[0]); vec_idx++; if (cc->cs_64) extra_data |= QUADD_SED_IP64; } extra_data |= cc->unw_method << QUADD_SED_UNW_METHOD_SHIFT; s->reserved |= cc->unw_rc << QUADD_SAMPLE_URC_SHIFT; } s->callchain_nr = bt_size; record_data.record_type = QUADD_RECORD_TYPE_SAMPLE; s->events_flags = 0; for (i = 0; i < nr_events; i++) { u32 value = events[i].value; if (value > 0) { s->events_flags |= 1 << i; events_extra[nr_positive_events++] = value; } } if (nr_positive_events == 0) return; vec[vec_idx].base = events_extra; vec[vec_idx].len = nr_positive_events * sizeof(events_extra[0]); vec_idx++; state = task->state; if (state) { s->state = 1; vec[vec_idx].base = &state; vec[vec_idx].len = sizeof(state); vec_idx++; } else { s->state = 0; } quadd_put_sample(&record_data, vec, vec_idx); }
static void arm_backtrace_eabi(int cpu, struct pt_regs *const regs, unsigned int depth) { #if defined(__arm__) || defined(__aarch64__) struct stack_frame_eabi *curr; struct stack_frame_eabi bufcurr; #if defined(__arm__) const bool is_compat = false; unsigned long fp = regs->ARM_fp; unsigned long sp = regs->ARM_sp; unsigned long lr = regs->ARM_lr; const int gcc_frame_offset = sizeof(unsigned long); #else // Is userspace aarch32 (32 bit) const bool is_compat = compat_user_mode(regs); unsigned long fp = (is_compat ? regs->regs[11] : regs->regs[29]); unsigned long sp = (is_compat ? regs->compat_sp : regs->sp); unsigned long lr = (is_compat ? regs->compat_lr : regs->regs[30]); const int gcc_frame_offset = (is_compat ? sizeof(u32) : 0); #endif // clang frame offset is always zero int is_user_mode = user_mode(regs); // pc (current function) has already been added if (!is_user_mode) { return; } // Add the lr (parent function) // entry preamble may not have executed gator_add_trace(cpu, lr); // check fp is valid if (fp == 0 || fp < sp) { return; } // Get the current stack frame curr = (struct stack_frame_eabi *)(fp - gcc_frame_offset); if ((unsigned long)curr & 3) { return; } while (depth-- && curr) { if (!access_ok(VERIFY_READ, curr, sizeof(struct stack_frame_eabi)) || __copy_from_user_inatomic(&bufcurr, curr, sizeof(struct stack_frame_eabi))) { return; } fp = (is_compat ? bufcurr.fp_32 : bufcurr.fp); lr = (is_compat ? bufcurr.lr_32 : bufcurr.lr); #define calc_next(reg) ((reg) - gcc_frame_offset) // Returns true if reg is a valid fp #define validate_next(reg, curr) \ ((reg) != 0 && (calc_next(reg) & 3) == 0 && (unsigned long)(curr) < calc_next(reg)) // Try lr from the stack as the fp because gcc leaf functions do not push lr // If gcc_frame_offset is non-zero, the lr will also be the clang fp // This assumes code is at a lower address than the stack if (validate_next(lr, curr)) { fp = lr; lr = (is_compat ? bufcurr.lr2_32 : bufcurr.lr2); } gator_add_trace(cpu, lr); if (!validate_next(fp, curr)) { return; } // Move to the next stack frame curr = (struct stack_frame_eabi *)calc_next(fp); } #endif }