asmlinkage void __exception do_undefinstr(struct pt_regs *regs) { siginfo_t info; void __user *pc = (void __user *)instruction_pointer(regs); /* check for AArch32 breakpoint instructions */ if (!aarch32_break_handler(regs)) return; if (show_unhandled_signals && unhandled_signal(current, SIGILL) && printk_ratelimit()) { pr_info("%s[%d]: undefined instruction: pc=%p\n", current->comm, task_pid_nr(current), pc); dump_instr(KERN_INFO, regs); } info.si_signo = SIGILL; info.si_errno = 0; info.si_code = ILL_ILLOPC; info.si_addr = pc; arm64_notify_die("Oops - undefined instruction", regs, &info, 0); }
/* * Raise a SIGFPE for the current process. */ void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs) { siginfo_t info; unsigned int si_code = 0; if (esr & FPEXC_IOF) si_code = FPE_FLTINV; else if (esr & FPEXC_DZF) si_code = FPE_FLTDIV; else if (esr & FPEXC_OFF) si_code = FPE_FLTOVF; else if (esr & FPEXC_UFF) si_code = FPE_FLTUND; else if (esr & FPEXC_IXF) si_code = FPE_FLTRES; memset(&info, 0, sizeof(info)); info.si_signo = SIGFPE; info.si_code = si_code; info.si_addr = (void __user *)instruction_pointer(regs); send_sig_info(SIGFPE, &info, current); }
void show_regs (struct pt_regs *regs) { unsigned long flags; const char *processor_modes[] = { "USER_26", "FIQ_26", "IRQ_26", "SVC_26", "UK4_26", "UK5_26", "UK6_26", "UK7_26", "UK8_26", "UK9_26", "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26", "USER_32", "FIQ_32", "IRQ_32", "SVC_32", "UK4_32", "UK5_32", "UK6_32", "ABT_32", "UK8_32", "UK9_32", "UK10_32", "UND_32", "UK12_32", "UK13_32", "UK14_32", "SYS_32", }; flags = condition_codes (regs); printf ("pc : [<%08lx>] lr : [<%08lx>]\n" "sp : %08lx ip : %08lx fp : %08lx\n", instruction_pointer (regs), regs->ARM_lr, regs->ARM_sp, regs->ARM_ip, regs->ARM_fp); printf ("r10: %08lx r9 : %08lx r8 : %08lx\n", regs->ARM_r10, regs->ARM_r9, regs->ARM_r8); printf ("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n", regs->ARM_r7, regs->ARM_r6, regs->ARM_r5, regs->ARM_r4); printf ("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n", regs->ARM_r3, regs->ARM_r2, regs->ARM_r1, regs->ARM_r0); printf ("Flags: %c%c%c%c", flags & CC_N_BIT ? 'N' : 'n', flags & CC_Z_BIT ? 'Z' : 'z', flags & CC_C_BIT ? 'C' : 'c', flags & CC_V_BIT ? 'V' : 'v'); printf (" IRQs %s FIQs %s Mode %s%s\n", interrupts_enabled (regs) ? "on" : "off", fast_interrupts_enabled (regs) ? "on" : "off", processor_modes[processor_mode (regs)], thumb_mode (regs) ? " (T)" : ""); }
/* * This function is protected against re-entrancy. */ void die(const char *str, struct pt_regs *regs, int err) { struct thread_info *thread = current_thread_info(); int ret; #ifdef CONFIG_HUAWEI_PRINTK_CTRL printk_level_setup(LOGLEVEL_DEBUG); #endif oops_enter(); raw_spin_lock_irq(&die_lock); console_verbose(); bust_spinlocks(1); #ifdef CONFIG_HISI_BB set_exception_info(instruction_pointer(regs)); #endif ret = __die(str, err, thread, regs); if (regs && kexec_should_crash(thread->task)) crash_kexec(regs); bust_spinlocks(0); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); raw_spin_unlock_irq(&die_lock); oops_exit(); if (in_interrupt()) panic("Fatal exception in interrupt"); if (panic_on_oops) panic("Fatal exception"); if (ret != NOTIFY_STOP) do_exit(SIGSEGV); #ifdef CONFIG_HUAWEI_PRINTK_CTRL printk_level_setup(sysctl_printk_level); #endif }
static int ks8695_pci_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { unsigned long pc = instruction_pointer(regs); unsigned long instr = *(unsigned long *)pc; unsigned long cmdstat; cmdstat = __raw_readl(KS8695_PCI_VA + KS8695_CRCFCS); printk(KERN_ERR "PCI abort: address = 0x%08lx fsr = 0x%03x PC = 0x%08lx LR = 0x%08lx [%s%s%s%s%s]\n", addr, fsr, regs->ARM_pc, regs->ARM_lr, cmdstat & (PCI_STATUS_SIG_TARGET_ABORT << 16) ? "GenTarget" : " ", cmdstat & (PCI_STATUS_REC_TARGET_ABORT << 16) ? "RecvTarget" : " ", cmdstat & (PCI_STATUS_REC_MASTER_ABORT << 16) ? "MasterAbort" : " ", cmdstat & (PCI_STATUS_SIG_SYSTEM_ERROR << 16) ? "SysError" : " ", cmdstat & (PCI_STATUS_DETECTED_PARITY << 16) ? "Parity" : " " ); __raw_writel(cmdstat, KS8695_PCI_VA + KS8695_CRCFCS); /* * If the instruction being executed was a read, * make it look like it read all-ones. */ if ((instr & 0x0c100000) == 0x04100000) { int reg = (instr >> 12) & 15; unsigned long val; if (instr & 0x00400000) val = 255; else val = -1; regs->uregs[reg] = val; regs->ARM_pc += 4; return 0; }
void __show_regs(struct pt_regs *regs) { int i, top_reg; u64 lr, sp; if (compat_user_mode(regs)) { lr = regs->compat_lr; sp = regs->compat_sp; top_reg = 12; } else { lr = regs->regs[30]; sp = regs->sp; top_reg = 29; } show_regs_print_info(KERN_DEFAULT); print_symbol("PC is at %s\n", instruction_pointer(regs)); print_symbol("LR is at %s\n", lr); printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n", regs->pc, lr, regs->pstate); printk("sp : %016llx\n", sp); i = top_reg; while (i >= 0) { printk("x%-2d: %016llx ", i, regs->regs[i]); i--; if (i % 2 == 0) { pr_cont("x%-2d: %016llx ", i, regs->regs[i]); i--; } pr_cont("\n"); } }
static int check_and_rewind_pc(char *put_str, char *arg) { unsigned long addr = lookup_addr(arg); unsigned long ip; int offset = 0; kgdb_hex2mem(&put_str[1], (char *)kgdbts_gdb_regs, NUMREGBYTES); gdb_regs_to_pt_regs(kgdbts_gdb_regs, &kgdbts_regs); ip = instruction_pointer(&kgdbts_regs); v2printk("Stopped at IP: %lx\n", ip); #ifdef GDB_ADJUSTS_BREAK_OFFSET /* On some arches, a breakpoint stop requires it to be decremented */ if (addr + BREAK_INSTR_SIZE == ip) offset = -BREAK_INSTR_SIZE; #endif if (arch_needs_sstep_emulation && sstep_addr && ip + offset == sstep_addr && ((!strcmp(arg, "sys_open") || !strcmp(arg, "do_fork")))) { /* This is special case for emulated single step */ v2printk("Emul: rewind hit single step bp\n"); restart_from_top_after_write = 1; } else if (strcmp(arg, "silent") && ip + offset != addr) { eprintk("kgdbts: BP mismatch %lx expected %lx\n", ip + offset, addr); return 1; } /* Readjust the instruction pointer if needed */ ip += offset; cont_addr = ip; #ifdef GDB_ADJUSTS_BREAK_OFFSET instruction_pointer_set(&kgdbts_regs, ip); #endif return 0; }
/* * Oops. The kernel tried to access some page that wasn't present. */ static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr, int error_code, struct pt_regs *regs) { unsigned long fixup; /* * Are we prepared to handle this kernel fault? */ if ((fixup = search_exception_table(instruction_pointer(regs))) != 0) { #ifdef DEBUG printk(KERN_DEBUG "%s: Exception at [<%lx>] addr=%lx (fixup: %lx)\n", current->comm, regs->ARM_pc, addr, fixup); #endif regs->ARM_pc = fixup; return; } /* * No handler, we'll have to terminate things with extreme prejudice. */ printk(KERN_ALERT "Unable to handle kernel %s at virtual address %08lx\n", (addr < PAGE_SIZE) ? "NULL pointer dereference" : "paging request", addr); show_pte(mm, addr); #ifdef CONFIG_KGDB if (kgdb_active()) { do_kgdb(regs, SIGSEGV); } #endif die("Oops", regs, error_code); do_exit(SIGKILL); }
asmlinkage void __exception do_undefinstr(struct pt_regs *regs) { siginfo_t info; void __user *pc = (void __user *)instruction_pointer(regs); #ifdef CONFIG_COMPAT /* check for AArch32 breakpoint instructions */ if (compat_user_mode(regs) && aarch32_break_trap(regs) == 0) return; #endif if (show_unhandled_signals) { pr_info("%s[%d]: undefined instruction: pc=%p\n", current->comm, task_pid_nr(current), pc); dump_instr(KERN_INFO, regs); } info.si_signo = SIGILL; info.si_errno = 0; info.si_code = ILL_ILLOPC; info.si_addr = pc; arm64_notify_die("Oops - undefined instruction", regs, &info, 0); }
unsigned long perf_instruction_pointer(struct pt_regs *regs) { return is_in_guest(regs) ? instruction_pointer_guest(regs) : instruction_pointer(regs); }
static int do_page_fault(unsigned long addr, int mode, struct pt_regs *regs) { struct task_struct *tsk; struct mm_struct *mm; unsigned long fixup; int fault; tsk = current; mm = tsk->mm; /* * We fault-in kernel-space virtual memory on-demand. The * 'reference' page table is init_mm.pgd. * * NOTE! We MUST NOT take any locks for this case. We may * be in an interrupt or a critical region, and should * only copy the information from the master page table, * nothing more. */ if (addr >= TASK_SIZE) goto vmalloc_fault; /* * If we're in an interrupt or have no user * context, we must not take the fault.. */ if (in_interrupt() || !mm) goto no_context; down(&mm->mmap_sem); fault = __do_page_fault(mm, addr, mode, tsk); up(&mm->mmap_sem); ret: /* * Handle the "normal" case first */ if (fault > 0) return 0; /* * We had some memory, but were unable to * successfully fix up this page fault. */ if (fault == 0) goto do_sigbus; /* * If we are in kernel mode at this point, we * have no context to handle this fault with. */ if (!user_mode(regs)) goto no_context; if (fault == -3) { /* * We ran out of memory, or some other thing happened to * us that made us unable to handle the page fault gracefully. */ printk("VM: killing process %s\n", tsk->comm); do_exit(SIGKILL); } else { /* * Something tried to access memory that isn't in our memory map.. * User mode accesses just cause a SIGSEGV */ struct siginfo si; #ifdef CONFIG_DEBUG_USER printk(KERN_DEBUG "%s: unhandled page fault at pc=0x%08lx, " "lr=0x%08lx (bad address=0x%08lx, code %d)\n", tsk->comm, regs->ARM_pc, regs->ARM_lr, addr, mode); #endif tsk->thread.address = addr; tsk->thread.error_code = mode; tsk->thread.trap_no = 14; si.si_signo = SIGSEGV; si.si_errno = 0; si.si_code = fault == -1 ? SEGV_ACCERR : SEGV_MAPERR; si.si_addr = (void *)addr; force_sig_info(SIGSEGV, &si, tsk); } return 0; /* * We ran out of memory, or some other thing happened to us that made * us unable to handle the page fault gracefully. */ do_sigbus: /* * Send a sigbus, regardless of whether we were in kernel * or user mode. */ tsk->thread.address = addr; tsk->thread.error_code = mode; tsk->thread.trap_no = 14; force_sig(SIGBUS, tsk); /* Kernel mode? Handle exceptions or die */ if (user_mode(regs)) return 0; no_context: /* Are we prepared to handle this kernel fault? */ if ((fixup = search_exception_table(instruction_pointer(regs))) != 0) { #ifdef DEBUG printk(KERN_DEBUG "%s: Exception at [<%lx>] addr=%lx (fixup: %lx)\n", tsk->comm, regs->ARM_pc, addr, fixup); #endif regs->ARM_pc = fixup; return 0; } /* * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ printk(KERN_ALERT "Unable to handle kernel %s at virtual address %08lx\n", (addr < PAGE_SIZE) ? "NULL pointer dereference" : "paging request", addr); show_pte(mm, addr); die("Oops", regs, mode); do_exit(SIGKILL); return 0; vmalloc_fault: fault = __do_vmalloc_fault(addr, mm); goto ret; }
int kdb_stub(struct kgdb_state *ks) { int error = 0; kdb_bp_t *bp; unsigned long addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs); kdb_reason_t reason = KDB_REASON_OOPS; kdb_dbtrap_t db_result = KDB_DB_NOBPT; int i; if (KDB_STATE(REENTRY)) { reason = KDB_REASON_SWITCH; KDB_STATE_CLEAR(REENTRY); addr = instruction_pointer(ks->linux_regs); } ks->pass_exception = 0; if (atomic_read(&kgdb_setting_breakpoint)) reason = KDB_REASON_KEYBOARD; for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) { if ((bp->bp_enabled) && (bp->bp_addr == addr)) { reason = KDB_REASON_BREAK; db_result = KDB_DB_BPT; if (addr != instruction_pointer(ks->linux_regs)) kgdb_arch_set_pc(ks->linux_regs, addr); break; } } if (reason == KDB_REASON_BREAK || reason == KDB_REASON_SWITCH) { for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) { if (bp->bp_free) continue; if (bp->bp_addr == addr) { bp->bp_delay = 1; bp->bp_delayed = 1; /* * SSBPT is set when the kernel debugger must single step a * task in order to re-establish an instruction breakpoint * which uses the instruction replacement mechanism. It is * cleared by any action that removes the need to single-step * the breakpoint. */ reason = KDB_REASON_BREAK; db_result = KDB_DB_BPT; KDB_STATE_SET(SSBPT); break; } } } if (reason != KDB_REASON_BREAK && ks->ex_vector == 0 && ks->signo == SIGTRAP) { reason = KDB_REASON_SSTEP; db_result = KDB_DB_BPT; } /* Set initial kdb state variables */ KDB_STATE_CLEAR(KGDB_TRANS); kdb_initial_cpu = ks->cpu; kdb_current_task = kgdb_info[ks->cpu].task; kdb_current_regs = kgdb_info[ks->cpu].debuggerinfo; /* Remove any breakpoints as needed by kdb and clear single step */ kdb_bp_remove(); KDB_STATE_CLEAR(DOING_SS); KDB_STATE_CLEAR(DOING_SSB); KDB_STATE_SET(PAGER); /* zero out any offline cpu data */ for_each_present_cpu(i) { if (!cpu_online(i)) { kgdb_info[i].debuggerinfo = NULL; kgdb_info[i].task = NULL; } } if (ks->err_code == DIE_OOPS || reason == KDB_REASON_OOPS) { ks->pass_exception = 1; KDB_FLAG_SET(CATASTROPHIC); } kdb_initial_cpu = ks->cpu; if (KDB_STATE(SSBPT) && reason == KDB_REASON_SSTEP) { KDB_STATE_CLEAR(SSBPT); KDB_STATE_CLEAR(DOING_SS); } else { /* Start kdb main loop */ error = kdb_main_loop(KDB_REASON_ENTER, reason, ks->err_code, db_result, ks->linux_regs); } /* * Upon exit from the kdb main loop setup break points and restart * the system based on the requested continue state */ kdb_initial_cpu = -1; kdb_current_task = NULL; kdb_current_regs = NULL; KDB_STATE_CLEAR(PAGER); kdbnearsym_cleanup(); if (error == KDB_CMD_KGDB) { if (KDB_STATE(DOING_KGDB) || KDB_STATE(DOING_KGDB2)) { /* * This inteface glue which allows kdb to transition in into * the gdb stub. In order to do this the '?' or '' gdb serial * packet response is processed here. And then control is * passed to the gdbstub. */ if (KDB_STATE(DOING_KGDB)) gdbstub_state(ks, "?"); else gdbstub_state(ks, ""); KDB_STATE_CLEAR(DOING_KGDB); KDB_STATE_CLEAR(DOING_KGDB2); } return DBG_PASS_EVENT; } kdb_bp_install(ks->linux_regs); dbg_activate_sw_breakpoints(); /* Set the exit state to a single step or a continue */ if (KDB_STATE(DOING_SS)) gdbstub_state(ks, "s"); else gdbstub_state(ks, "c"); KDB_FLAG_CLEAR(CATASTROPHIC); /* Invoke arch specific exception handling prior to system resume */ kgdb_info[ks->cpu].ret_state = gdbstub_state(ks, "e"); if (ks->pass_exception) kgdb_info[ks->cpu].ret_state = 1; if (error == KDB_CMD_CPU) { KDB_STATE_SET(REENTRY); /* * Force clear the single step bit because kdb emulates this * differently vs the gdbstub */ kgdb_single_step = 0; dbg_deactivate_sw_breakpoints(); return DBG_SWITCH_CPU_EVENT; } return kgdb_info[ks->cpu].ret_state; }
/* * LDM/STM alignment handler. * * There are 4 variants of this instruction: * * B = rn pointer before instruction, A = rn pointer after instruction * ------ increasing address -----> * | | r0 | r1 | ... | rx | | * PU = 01 B A * PU = 11 B A * PU = 00 A B * PU = 10 A B */ static int do_alignment_ldmstm(unsigned long addr, unsigned long instr, struct pt_regs *regs) { unsigned int rd, rn, correction, nr_regs, regbits; unsigned long eaddr, newaddr; if (LDM_S_BIT(instr)) goto bad; correction = 4; /* processor implementation defined */ regs->ARM_pc += correction; ai_multi += 1; /* count the number of registers in the mask to be transferred */ nr_regs = hweight16(REGMASK_BITS(instr)) * 4; rn = RN_BITS(instr); newaddr = eaddr = regs->uregs[rn]; if (!LDST_U_BIT(instr)) nr_regs = -nr_regs; newaddr += nr_regs; if (!LDST_U_BIT(instr)) eaddr = newaddr; if (LDST_P_EQ_U(instr)) /* U = P */ eaddr += 4; /* * For alignment faults on the ARM922T/ARM920T the MMU makes * the FSR (and hence addr) equal to the updated base address * of the multiple access rather than the restored value. * Switch this message off if we've got a ARM92[02], otherwise * [ls]dm alignment faults are noisy! */ #if !(defined CONFIG_CPU_ARM922T) && !(defined CONFIG_CPU_ARM920T) /* * This is a "hint" - we already have eaddr worked out by the * processor for us. */ if (addr != eaddr) { printk(KERN_ERR "LDMSTM: PC = %08lx, instr = %08lx, " "addr = %08lx, eaddr = %08lx\n", instruction_pointer(regs), instr, addr, eaddr); show_regs(regs); } #endif if (user_mode(regs)) { for (regbits = REGMASK_BITS(instr), rd = 0; regbits; regbits >>= 1, rd += 1) if (regbits & 1) { if (LDST_L_BIT(instr)) { unsigned int val; get32t_unaligned_check(val, eaddr); regs->uregs[rd] = val; } else put32t_unaligned_check(regs->uregs[rd], eaddr); eaddr += 4; } } else { for (regbits = REGMASK_BITS(instr), rd = 0; regbits;
/** * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs * @regs: Reflects the saved state of the task after it has hit a breakpoint * instruction. * Return the address of the breakpoint instruction. * * This overrides the weak version in kernel/events/uprobes.c. */ unsigned long uprobe_get_swbp_addr(struct pt_regs *regs) { return instruction_pointer(regs); }
/* * Get user stack entries up to the pcstack_limit; return the number of entries * acquired. If pcstack is NULL, return the number of entries potentially * acquirable. */ unsigned long dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit) { struct task_struct *p = current; struct mm_struct *mm = p->mm; unsigned long tos, bos, fpc; unsigned long *sp; unsigned long depth = 0; struct vm_area_struct *stack_vma; struct page *stack_page = NULL; struct pt_regs *regs = current_pt_regs(); if (pcstack) { if (unlikely(pcstack_limit < 2)) { DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); return 0; } *pcstack++ = (uint64_t)p->pid; *pcstack++ = (uint64_t)p->tgid; pcstack_limit -= 2; } if (!user_mode(regs)) goto out; /* * There is always at least one address to report: the instruction * pointer itself (frame 0). */ depth++; fpc = instruction_pointer(regs); if (pcstack) { *pcstack++ = (uint64_t)fpc; pcstack_limit--; } /* * We cannot ustack() if this task has no mm, if this task is a kernel * thread, or when someone else has the mmap_sem or the page_table_lock * (because find_user_vma() ultimately does a __get_user_pages() and * thence a follow_page(), which can take that lock). */ if (mm == NULL || (p->flags & PF_KTHREAD) || spin_is_locked(&mm->page_table_lock)) goto out; if (!down_read_trylock(&mm->mmap_sem)) goto out; atomic_inc(&mm->mm_users); /* * The following construct can be replaced with: * tos = current_user_stack_pointer(); * once support for 4.0 is no longer necessary. */ #ifdef CONFIG_X86_64 tos = current_pt_regs()->sp; #else tos = user_stack_pointer(current_pt_regs()); #endif stack_vma = find_user_vma(p, mm, NULL, (unsigned long) tos, 0); if (!stack_vma || stack_vma->vm_start > (unsigned long) tos) goto unlock_out; #ifdef CONFIG_STACK_GROWSUP #error This code does not yet work on STACK_GROWSUP platforms. #endif bos = stack_vma->vm_end; if (stack_guard_page_end(stack_vma, bos)) bos -= PAGE_SIZE; /* * If we have a pcstack, loop as long as we are within the stack limit. * Otherwise, loop until we run out of stack. */ for (sp = (unsigned long *)tos; sp <= (unsigned long *)bos && ((pcstack && pcstack_limit > 0) || !pcstack); sp++) { struct vm_area_struct *code_vma; unsigned long addr; /* * Recheck for faultedness and pin at page boundaries. */ if (!stack_page || (((unsigned long)sp & PAGE_MASK) == 0)) { if (stack_page) { put_page(stack_page); stack_page = NULL; } if (!find_user_vma(p, mm, &stack_page, (unsigned long) sp, 1)) break; } DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); get_user(addr, sp); DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) { DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_BADADDR); break; } if (addr == fpc) continue; code_vma = find_user_vma(p, mm, NULL, addr, 0); if (!code_vma || code_vma->vm_start > addr) continue; if ((addr >= tos && addr <= bos) || (code_vma->vm_flags & VM_GROWSDOWN)) { /* stack address - may need it for the fpstack. */ } else if (code_vma->vm_flags & VM_EXEC) { if (pcstack) { *pcstack++ = addr; pcstack_limit--; } depth++; } } if (stack_page != NULL) put_page(stack_page); unlock_out: atomic_dec(&mm->mm_users); up_read(&mm->mmap_sem); out: if (pcstack) while (pcstack_limit--) *pcstack++ = 0; return depth; }
asmlinkage struct pt_regs *do_debug(struct pt_regs *regs) { struct thread_info *ti; unsigned long trampoline_addr; u32 status; u32 ctrl; int code; status = ocd_read(DS); ti = current_thread_info(); code = TRAP_BRKPT; pr_debug("do_debug: status=0x%08x PC=0x%08lx SR=0x%08lx tif=0x%08lx\n", status, regs->pc, regs->sr, ti->flags); if (!user_mode(regs)) { unsigned long die_val = DIE_BREAKPOINT; if (status & (1 << OCD_DS_SSS_BIT)) die_val = DIE_SSTEP; if (notify_die(die_val, "ptrace", regs, 0, 0, SIGTRAP) == NOTIFY_STOP) return regs; if ((status & (1 << OCD_DS_SWB_BIT)) && test_and_clear_ti_thread_flag( ti, TIF_BREAKPOINT)) { /* * Explicit breakpoint from trampoline or * exception/syscall/interrupt handler. * * The real saved regs are on the stack right * after the ones we saved on entry. */ regs++; pr_debug(" -> TIF_BREAKPOINT done, adjusted regs:" "PC=0x%08lx SR=0x%08lx\n", regs->pc, regs->sr); BUG_ON(!user_mode(regs)); if (test_thread_flag(TIF_SINGLE_STEP)) { pr_debug("Going to do single step...\n"); return regs; } /* * No TIF_SINGLE_STEP means we're done * stepping over a syscall. Do the trap now. */ code = TRAP_TRACE; } else if ((status & (1 << OCD_DS_SSS_BIT)) && test_ti_thread_flag(ti, TIF_SINGLE_STEP)) { pr_debug("Stepped into something, " "setting TIF_BREAKPOINT...\n"); set_ti_thread_flag(ti, TIF_BREAKPOINT); /* * We stepped into an exception, interrupt or * syscall handler. Some exception handlers * don't check for pending work, so we need to * set up a trampoline just in case. * * The exception entry code will undo the * trampoline stuff if it does a full context * save (which also means that it'll check for * pending work later.) */ if ((regs->sr & MODE_MASK) == MODE_EXCEPTION) { trampoline_addr = (unsigned long)&debug_trampoline; pr_debug("Setting up trampoline...\n"); ti->rar_saved = sysreg_read(RAR_EX); ti->rsr_saved = sysreg_read(RSR_EX); sysreg_write(RAR_EX, trampoline_addr); sysreg_write(RSR_EX, (MODE_EXCEPTION | SR_EM | SR_GM)); BUG_ON(ti->rsr_saved & MODE_MASK); } /* * If we stepped into a system call, we * shouldn't do a single step after we return * since the return address is right after the * "scall" instruction we were told to step * over. */ if ((regs->sr & MODE_MASK) == MODE_SUPERVISOR) { pr_debug("Supervisor; no single step\n"); clear_ti_thread_flag(ti, TIF_SINGLE_STEP); } ctrl = ocd_read(DC); ctrl &= ~(1 << OCD_DC_SS_BIT); ocd_write(DC, ctrl); return regs; } else { printk(KERN_ERR "Unexpected OCD_DS value: 0x%08x\n", status); printk(KERN_ERR "Thread flags: 0x%08lx\n", ti->flags); die("Unhandled debug trap in kernel mode", regs, SIGTRAP); } } else if (status & (1 << OCD_DS_SSS_BIT)) { /* Single step in user mode */ code = TRAP_TRACE; ctrl = ocd_read(DC); ctrl &= ~(1 << OCD_DC_SS_BIT); ocd_write(DC, ctrl); } pr_debug("Sending SIGTRAP: code=%d PC=0x%08lx SR=0x%08lx\n", code, regs->pc, regs->sr); clear_thread_flag(TIF_SINGLE_STEP); _exception(SIGTRAP, regs, code, instruction_pointer(regs)); return regs; }
int do_page_fault(unsigned long addr, int error_code, struct pt_regs *regs) { struct task_struct *tsk; struct mm_struct *mm; int fault; #if defined(CONFIG_KGDB) if (kgdb_fault_expected) kgdb_handle_bus_error(); #endif tsk = current; mm = tsk->mm; /* * If we're in an interrupt or have no user * context, we must not take the fault.. */ if (in_interrupt() || !mm) goto no_context; TRACE_TRAP_ENTRY(14, instruction_pointer(regs)); down_read(&mm->mmap_sem); fault = __do_page_fault(mm, addr, error_code, tsk); up_read(&mm->mmap_sem); TRACE_EVENT(TRACE_EV_TRAP_EXIT, NULL); /* * Handle the "normal" case first */ if (fault > 0) return 0; /* * We had some memory, but were unable to * successfully fix up this page fault. */ if (fault == 0) goto do_sigbus; /* * If we are in kernel mode at this point, we * have no context to handle this fault with. */ if (!user_mode(regs)) goto no_context; if (fault == -3) { /* * We ran out of memory, or some other thing happened to * us that made us unable to handle the page fault gracefully. */ printk("VM: killing process %s\n", tsk->comm); do_exit(SIGKILL); } else __do_user_fault(tsk, addr, error_code, fault == -1 ? SEGV_ACCERR : SEGV_MAPERR, regs); return 0; /* * We ran out of memory, or some other thing happened to us that made * us unable to handle the page fault gracefully. */ do_sigbus: /* * Send a sigbus, regardless of whether we were in kernel * or user mode. */ tsk->thread.address = addr; tsk->thread.error_code = error_code; tsk->thread.trap_no = 14; force_sig(SIGBUS, tsk); #ifdef CONFIG_DEBUG_USER printk(KERN_DEBUG "%s: sigbus at 0x%08lx, pc=0x%08lx\n", current->comm, addr, instruction_pointer(regs)); #endif /* Kernel mode? Handle exceptions or die */ if (user_mode(regs)) return 0; no_context: __do_kernel_fault(mm, addr, error_code, regs); return 0; }
int do_alignment(unsigned long addr, int error_code, struct pt_regs *regs) { union offset_union offset; unsigned long instr, instrptr; int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs *regs); unsigned int type; instrptr = instruction_pointer(regs); instr = *(unsigned long *)instrptr; if (user_mode(regs)) goto user; ai_sys += 1; fixup: regs->ARM_pc += 4; switch (CODING_BITS(instr)) { case 0x00000000: /* ldrh or strh */ if (LDSTH_I_BIT(instr)) offset.un = (instr & 0xf00) >> 4 | (instr & 15); else offset.un = regs->uregs[RM_BITS(instr)]; handler = do_alignment_ldrhstrh; break; case 0x04000000: /* ldr or str immediate */ offset.un = OFFSET_BITS(instr); handler = do_alignment_ldrstr; break; case 0x06000000: /* ldr or str register */ offset.un = regs->uregs[RM_BITS(instr)]; if (IS_SHIFT(instr)) { unsigned int shiftval = SHIFT_BITS(instr); switch(SHIFT_TYPE(instr)) { case SHIFT_LSL: offset.un <<= shiftval; break; case SHIFT_LSR: offset.un >>= shiftval; break; case SHIFT_ASR: offset.sn >>= shiftval; break; case SHIFT_RORRRX: if (shiftval == 0) { offset.un >>= 1; if (regs->ARM_cpsr & PSR_C_BIT) offset.un |= 1 << 31; } else offset.un = offset.un >> shiftval | offset.un << (32 - shiftval); break; }
static int do_alignment_exception(struct pt_regs *regs) { unsigned int instr, rd, rn, correction, nr_regs, regbits; unsigned long eaddr; union { unsigned long un; signed long sn; } offset; if (user_mode(regs)) { set_cr(cr_no_alignment); ai_user += 1; return 0; } ai_sys += 1; instr = *(unsigned long *)instruction_pointer(regs); correction = 4; /* sometimes 8 on ARMv3 */ regs->ARM_pc += correction + 4; rd = RD_BITS(instr); rn = RN_BITS(instr); eaddr = regs->uregs[rn]; switch(CODING_BITS(instr)) { case 0x00000000: if ((instr & 0x0ff00ff0) == 0x01000090) { ai_skipped += 1; printk(KERN_ERR "Unaligned trap: not handling swp instruction\n"); return 1; } if (((instr & 0x0e000090) == 0x00000090) && (instr & 0x60) != 0) { ai_half += 1; if (LDSTH_I_BIT(instr)) offset.un = (instr & 0xf00) >> 4 | (instr & 15); else offset.un = regs->uregs[RM_BITS(instr)]; if (LDST_P_BIT(instr)) { if (LDST_U_BIT(instr)) eaddr += offset.un; else eaddr -= offset.un; } if (LDST_L_BIT(instr)) regs->uregs[rd] = get_unaligned((unsigned short *)eaddr); else put_unaligned(regs->uregs[rd], (unsigned short *)eaddr); /* signed half-word? */ if (instr & 0x40) regs->uregs[rd] = (long)((short) regs->uregs[rd]); if (!LDST_P_BIT(instr)) { if (LDST_U_BIT(instr)) eaddr += offset.un; else eaddr -= offset.un; regs->uregs[rn] = eaddr; } else if (LDST_W_BIT(instr)) regs->uregs[rn] = eaddr; break; }
/* Start a stopped thread and stop it again immediately. The function replaces * the instruction at the PC to a breakpoint instruction, starts the thread and * waits for the thread to receive a SIGTRAP (or SIGILL) signal. After the * thread stops, the patched instruction is reverted. * * The function checks if the address pointed by the PC is actually inside an * executable memory segment. If it isn't, the function restarts the thread * without patching any instructions. The address pointed by the PC is * illegal, so the thread should receive a SIGSEGV signal in this case. * * The function must be called with all threads of the process stopped. * Moreover, the given process must still be alive (i.e. not a zombie). */ static bool fncall_restop(thread_t * thread) { int patch; int signo; struct pt_regs regs[2]; /* Original instruction. */ insn_t insn; insn_kind_t kind; address_t address; assert(!thread->state.slavemode); thread->state.slavemode = true; info("Restopping thread %s...", str_thread(thread)); /* Save current register values. */ if (!thread_get_regs(thread, ®s[0])) goto fail; /* Get the current PC. */ address = instruction_pointer(®s[0]); patch = procfs_address_executable(thread, address); if (patch) { /* Read the original instruction. */ kind = is_thumb_mode(®s[0]) ? INSN_KIND_THUMB : INSN_KIND_ARM; if (!patch_read_insn_detect_kind(thread, address, &insn, &kind)) goto fail; verbose("The PC register in thread %s points at address %p, " "which contains %s instruction '%s'. It will " "be temporary replaced by a breakpoint instruction. The thread " "will then be restarted. A SIGILL or SIGTRAP signal should be " "received immediately after thread restart.", str_thread(thread), (void *) address, insn_kind_str(kind), arm_disassemble(insn, kind)); /* Patch instruction. */ if (!patch_insn(thread, address, kind, get_breakpoint_insn(kind))) goto fail; } else { /* The address pointed by the PC is invalid. Running the thread should * cause a SIGSEGV. */ verbose("The PC register in thread %s points at address %p, " "which is invalid (not inside a executable memory segment). " "It will be restarted without any patching. A SIGSEGV signal " "should be received immediately after thread restart.", str_thread(thread), (void *) address); } thread_continue(thread, 0); /* Wait until the program is stopped by a signal. */ while (1) { thread_wait(thread, false); if (thread->state.dead) goto fail; if ((thread->state.signo == SIGSEGV) || (thread->state.signo == SIGTRAP) || (thread->state.signo == SIGBUS)) { break; } warning("Unexpected signal %s received during restopping of thread %s. The signal will be delivered.", str_signal(thread->state.signo), str_thread(thread)); /* Deliver the signal. */ thread_continue(thread, 0); } signo = thread->state.signo; thread->state.signo = 0; /* The process stopped, read new register values. */ if (!thread_get_regs(thread, ®s[1])) goto fail; /* Warn about any abnormalities. */ if (regs[1].ARM_pc != regs[0].ARM_pc) { warning("Unexpected change of PC register during restopping of %s.", str_thread(thread)); } if (regs[1].ARM_lr != regs[0].ARM_lr) { warning("Unexpected change of LR register during restopping of %s.", str_thread(thread)); } if (regs[1].ARM_sp != regs[0].ARM_sp) { warning("Unexpected change of SP register during restopping of %s.", str_thread(thread)); } /* Restore old register values. */ if (!thread_set_regs(thread, ®s[0])) goto fail; if (patch) { if ((signo != SIGILL) && (signo != SIGTRAP)) { warning("Thread %s was stopped by unexpected signal %s. Ignoring.", str_thread(thread), str_signal(signo)); } /* Revert original instruction */ patch_insn(thread, address, kind, insn); } else { if ((signo != SIGSEGV)) { warning("%s was stopped by unexpected signal %s. Ignoring.", str_thread(thread), str_signal(signo)); } } info("Finished restop of %s.", str_thread(thread)); thread->state.slavemode = false; if (!thread->state.dead) return true; fail: assert(thread->state.dead); error("Thread %s died during restop operation.", str_thread(thread)); return false; }
void do_perfctr_interrupt(struct pt_regs *regs) { preempt_disable(); (*perfctr_ihandler)(instruction_pointer(regs)); preempt_enable_no_resched(); }
unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs) { return instruction_pointer(regs); }
void probe_kernel_crash_kexec(void *_data, struct kimage *image, struct pt_regs *regs) { trace_mark_tp(kernel, crash_kexec, kernel_crash_kexec, probe_kernel_crash_kexec, "image %p ip %p", image, regs ? (void *)instruction_pointer(regs) : NULL); }
unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs) { if (exception == 3) return instruction_pointer(regs) - 1; return instruction_pointer(regs); }
asmlinkage void do_page_fault(unsigned long address, struct pt_regs *regs, int protection, int writeaccess) { struct task_struct *tsk; struct mm_struct *mm; struct vm_area_struct * vma; siginfo_t info; int fault; unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; D(printk(KERN_DEBUG "Page fault for %lX on %X at %lX, prot %d write %d\n", address, smp_processor_id(), instruction_pointer(regs), protection, writeaccess)); tsk = current; /* * We fault-in kernel-space virtual memory on-demand. The * 'reference' page table is init_mm.pgd. * * NOTE! We MUST NOT take any locks for this case. We may * be in an interrupt or a critical region, and should * only copy the information from the master page table, * nothing more. * * NOTE2: This is done so that, when updating the vmalloc * mappings we don't have to walk all processes pgdirs and * add the high mappings all at once. Instead we do it as they * are used. However vmalloc'ed page entries have the PAGE_GLOBAL * bit set so sometimes the TLB can use a lingering entry. * * This verifies that the fault happens in kernel space * and that the fault was not a protection error (error_code & 1). */ if (address >= VMALLOC_START && !protection && !user_mode(regs)) goto vmalloc_fault; /* When stack execution is not allowed we store the signal * trampolines in the reserved cris_signal_return_page. * Handle this in the exact same way as vmalloc (we know * that the mapping is there and is valid so no need to * call handle_mm_fault). */ if (cris_signal_return_page && address == cris_signal_return_page && !protection && user_mode(regs)) goto vmalloc_fault; /* we can and should enable interrupts at this point */ local_irq_enable(); mm = tsk->mm; info.si_code = SEGV_MAPERR; /* * If we're in an interrupt or "atomic" operation or have no * user context, we must not take the fault. */ if (!mm || pagefault_disabled()) goto no_context; if (user_mode(regs)) flags |= FAULT_FLAG_USER; retry: down_read(&mm->mmap_sem); vma = find_vma(mm, address); if (!vma) goto bad_area; if (vma->vm_start <= address) goto good_area; if (!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; if (user_mode(regs)) { /* * accessing the stack below usp is always a bug. * we get page-aligned addresses so we can only check * if we're within a page from usp, but that might be * enough to catch brutal errors at least. */ if (address + PAGE_SIZE < rdusp()) goto bad_area; } if (expand_stack(vma, address)) goto bad_area; /* * Ok, we have a good vm_area for this memory access, so * we can handle it.. */ good_area: info.si_code = SEGV_ACCERR; /* first do some preliminary protection checks */ if (writeaccess == 2){ if (!(vma->vm_flags & VM_EXEC)) goto bad_area; } else if (writeaccess == 1) { if (!(vma->vm_flags & VM_WRITE)) goto bad_area; flags |= FAULT_FLAG_WRITE; } else { if (!(vma->vm_flags & (VM_READ | VM_EXEC))) goto bad_area; } /* * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo * the fault. */ fault = handle_mm_fault(mm, vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); } if (flags & FAULT_FLAG_ALLOW_RETRY) { if (fault & VM_FAULT_MAJOR) tsk->maj_flt++; else tsk->min_flt++; if (fault & VM_FAULT_RETRY) { flags &= ~FAULT_FLAG_ALLOW_RETRY; flags |= FAULT_FLAG_TRIED; /* * No need to up_read(&mm->mmap_sem) as we would * have already released it in __lock_page_or_retry * in mm/filemap.c. */ goto retry; } } up_read(&mm->mmap_sem); return; /* * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */ bad_area: up_read(&mm->mmap_sem); bad_area_nosemaphore: DPG(show_registers(regs)); /* User mode accesses just cause a SIGSEGV */ if (user_mode(regs)) { printk(KERN_NOTICE "%s (pid %d) segfaults for page " "address %08lx at pc %08lx\n", tsk->comm, tsk->pid, address, instruction_pointer(regs)); /* With DPG on, we've already dumped registers above. */ DPG(if (0)) show_registers(regs); #ifdef CONFIG_NO_SEGFAULT_TERMINATION DECLARE_WAIT_QUEUE_HEAD(wq); wait_event_interruptible(wq, 0 == 1); #else info.si_signo = SIGSEGV; info.si_errno = 0; /* info.si_code has been set above */ info.si_addr = (void *)address; force_sig_info(SIGSEGV, &info, tsk); #endif return; } no_context: /* Are we prepared to handle this kernel fault? * * (The kernel has valid exception-points in the source * when it accesses user-memory. When it fails in one * of those points, we find it in a table and do a jump * to some fixup code that loads an appropriate error * code) */ if (find_fixup_code(regs)) return; /* * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ if (!oops_in_progress) { oops_in_progress = 1; if ((unsigned long) (address) < PAGE_SIZE) printk(KERN_ALERT "Unable to handle kernel NULL " "pointer dereference"); else printk(KERN_ALERT "Unable to handle kernel access" " at virtual address %08lx\n", address); die_if_kernel("Oops", regs, (writeaccess << 1) | protection); oops_in_progress = 0; } do_exit(SIGKILL); /* * We ran out of memory, or some other thing happened to us that made * us unable to handle the page fault gracefully. */ out_of_memory: up_read(&mm->mmap_sem); if (!user_mode(regs)) goto no_context; pagefault_out_of_memory(); return; do_sigbus: up_read(&mm->mmap_sem); /* * Send a sigbus, regardless of whether we were in kernel * or user mode. */ info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRERR; info.si_addr = (void *)address; force_sig_info(SIGBUS, &info, tsk); /* Kernel mode? Handle exceptions or die */ if (!user_mode(regs)) goto no_context; return; vmalloc_fault: { /* * Synchronize this task's top level page-table * with the 'reference' page table. * * Use current_pgd instead of tsk->active_mm->pgd * since the latter might be unavailable if this * code is executed in a misfortunately run irq * (like inside schedule() between switch_mm and * switch_to...). */ int offset = pgd_index(address); pgd_t *pgd, *pgd_k; pud_t *pud, *pud_k; pmd_t *pmd, *pmd_k; pte_t *pte_k; pgd = (pgd_t *)per_cpu(current_pgd, smp_processor_id()) + offset; pgd_k = init_mm.pgd + offset; /* Since we're two-level, we don't need to do both * set_pgd and set_pmd (they do the same thing). If * we go three-level at some point, do the right thing * with pgd_present and set_pgd here. * * Also, since the vmalloc area is global, we don't * need to copy individual PTE's, it is enough to * copy the pgd pointer into the pte page of the * root task. If that is there, we'll find our pte if * it exists. */ pud = pud_offset(pgd, address); pud_k = pud_offset(pgd_k, address); if (!pud_present(*pud_k)) goto no_context; pmd = pmd_offset(pud, address); pmd_k = pmd_offset(pud_k, address); if (!pmd_present(*pmd_k)) goto bad_area_nosemaphore; set_pmd(pmd, *pmd_k); /* Make sure the actual PTE exists as well to * catch kernel vmalloc-area accesses to non-mapped * addresses. If we don't do this, this will just * silently loop forever. */ pte_k = pte_offset_kernel(pmd_k, address); if (!pte_present(*pte_k)) goto no_context; return; } }
static void read_all_sources(struct pt_regs *regs, struct task_struct *task) { u32 state, extra_data = 0; int i, vec_idx = 0, bt_size = 0; int nr_events = 0, nr_positive_events = 0; struct pt_regs *user_regs; struct quadd_iovec vec[5]; struct hrt_event_value events[QUADD_MAX_COUNTERS]; u32 events_extra[QUADD_MAX_COUNTERS]; struct quadd_record_data record_data; struct quadd_sample_data *s = &record_data.sample; struct quadd_ctx *ctx = hrt.quadd_ctx; struct quadd_cpu_context *cpu_ctx = this_cpu_ptr(hrt.cpu_ctx); struct quadd_callchain *cc = &cpu_ctx->cc; if (!regs) return; if (atomic_read(&cpu_ctx->nr_active) == 0) return; if (!task) task = current; rcu_read_lock(); if (!task_nsproxy(task)) { rcu_read_unlock(); return; } rcu_read_unlock(); if (ctx->pmu && ctx->pmu_info.active) nr_events += read_source(ctx->pmu, regs, events, QUADD_MAX_COUNTERS); if (ctx->pl310 && ctx->pl310_info.active) nr_events += read_source(ctx->pl310, regs, events + nr_events, QUADD_MAX_COUNTERS - nr_events); if (!nr_events) return; if (user_mode(regs)) user_regs = regs; else user_regs = current_pt_regs(); if (get_sample_data(s, regs, task)) return; vec[vec_idx].base = &extra_data; vec[vec_idx].len = sizeof(extra_data); vec_idx++; s->reserved = 0; if (ctx->param.backtrace) { cc->unw_method = hrt.unw_method; bt_size = quadd_get_user_callchain(user_regs, cc, ctx, task); if (!bt_size && !user_mode(regs)) { unsigned long pc = instruction_pointer(user_regs); cc->nr = 0; #ifdef CONFIG_ARM64 cc->cs_64 = compat_user_mode(user_regs) ? 0 : 1; #else cc->cs_64 = 0; #endif bt_size += quadd_callchain_store(cc, pc, QUADD_UNW_TYPE_KCTX); } if (bt_size > 0) { int ip_size = cc->cs_64 ? sizeof(u64) : sizeof(u32); int nr_types = DIV_ROUND_UP(bt_size, 8); vec[vec_idx].base = cc->cs_64 ? (void *)cc->ip_64 : (void *)cc->ip_32; vec[vec_idx].len = bt_size * ip_size; vec_idx++; vec[vec_idx].base = cc->types; vec[vec_idx].len = nr_types * sizeof(cc->types[0]); vec_idx++; if (cc->cs_64) extra_data |= QUADD_SED_IP64; } extra_data |= cc->unw_method << QUADD_SED_UNW_METHOD_SHIFT; s->reserved |= cc->unw_rc << QUADD_SAMPLE_URC_SHIFT; } s->callchain_nr = bt_size; record_data.record_type = QUADD_RECORD_TYPE_SAMPLE; s->events_flags = 0; for (i = 0; i < nr_events; i++) { u32 value = events[i].value; if (value > 0) { s->events_flags |= 1 << i; events_extra[nr_positive_events++] = value; } } if (nr_positive_events == 0) return; vec[vec_idx].base = events_extra; vec[vec_idx].len = nr_positive_events * sizeof(events_extra[0]); vec_idx++; state = task->state; if (state) { s->state = 1; vec[vec_idx].base = &state; vec[vec_idx].len = sizeof(state); vec_idx++; } else { s->state = 0; } quadd_put_sample(&record_data, vec, vec_idx); }
* Called by kernel/ptrace.c when detaching.. */ void ptrace_disable(struct task_struct *child) { } /* * Handle hitting a breakpoint. */ static int ptrace_break(struct pt_regs *regs) { siginfo_t info = { .si_signo = SIGTRAP, .si_errno = 0, .si_code = TRAP_BRKPT, .si_addr = (void __user *)instruction_pointer(regs), }; force_sig_info(SIGTRAP, &info, current); return 0; } static int arm64_break_trap(unsigned long addr, unsigned int esr, struct pt_regs *regs) { return ptrace_break(regs); } #ifdef CONFIG_HAVE_HW_BREAKPOINT /* * Handle hitting a HW-breakpoint.
/* * Debug exception handlers. */ static int breakpoint_handler(unsigned long unused, unsigned int esr, struct pt_regs *regs) { int i, step = 0, *kernel_step; u32 ctrl_reg; u64 addr, val; struct perf_event *bp, **slots; struct debug_info *debug_info; struct arch_hw_breakpoint_ctrl ctrl; slots = this_cpu_ptr(bp_on_reg); addr = instruction_pointer(regs); debug_info = ¤t->thread.debug; for (i = 0; i < core_num_brps; ++i) { rcu_read_lock(); bp = slots[i]; if (bp == NULL) goto unlock; /* Check if the breakpoint value matches. */ val = read_wb_reg(AARCH64_DBG_REG_BVR, i); if (val != (addr & ~0x3)) goto unlock; /* Possible match, check the byte address select to confirm. */ ctrl_reg = read_wb_reg(AARCH64_DBG_REG_BCR, i); decode_ctrl_reg(ctrl_reg, &ctrl); if (!((1 << (addr & 0x3)) & ctrl.len)) goto unlock; counter_arch_bp(bp)->trigger = addr; perf_bp_event(bp, regs); /* Do we need to handle the stepping? */ if (!bp->overflow_handler) step = 1; unlock: rcu_read_unlock(); } if (!step) return 0; if (user_mode(regs)) { debug_info->bps_disabled = 1; toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 0); /* If we're already stepping a watchpoint, just return. */ if (debug_info->wps_disabled) return 0; if (test_thread_flag(TIF_SINGLESTEP)) debug_info->suspended_step = 1; else user_enable_single_step(current); } else { toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 0); kernel_step = this_cpu_ptr(&stepping_kernel_bp); if (*kernel_step != ARM_KERNEL_STEP_NONE) return 0; if (kernel_active_single_step()) { *kernel_step = ARM_KERNEL_STEP_SUSPEND; } else { *kernel_step = ARM_KERNEL_STEP_ACTIVE; kernel_enable_single_step(regs); } } return 0; }
int __ipipe_handle_exception(struct pt_regs *regs, long error_code, int vector) { bool root_entry = false; unsigned long flags = 0; unsigned long cr2 = 0; if (ipipe_root_domain_p) { root_entry = true; local_save_flags(flags); /* * Replicate hw interrupt state into the virtual mask * before calling the I-pipe event handler over the * root domain. Also required later when calling the * Linux exception handler. */ if (irqs_disabled_hw()) local_irq_disable(); } #ifdef CONFIG_KGDB /* catch exception KGDB is interested in over non-root domains */ else if (__ipipe_xlate_signo[vector] >= 0 && !kgdb_handle_exception(vector, __ipipe_xlate_signo[vector], error_code, regs)) return 1; #endif /* CONFIG_KGDB */ if (vector == ex_do_page_fault) cr2 = native_read_cr2(); if (unlikely(ipipe_trap_notify(vector, regs))) { if (root_entry) local_irq_restore_nosync(flags); return 1; } if (likely(ipipe_root_domain_p)) { /* * If root is not the topmost domain or in case we faulted in * the iret path of x86-32, regs.flags does not match the root * domain state. The fault handler or the low-level return * code may evaluate it. So fix this up, either by the root * state sampled on entry or, if we migrated to root, with the * current state. */ __fixup_if(root_entry ? raw_irqs_disabled_flags(flags) : raw_irqs_disabled(), regs); } else { /* Detect unhandled faults over non-root domains. */ struct ipipe_domain *ipd = ipipe_current_domain; /* Switch to root so that Linux can handle the fault cleanly. */ __ipipe_current_domain = ipipe_root_domain; ipipe_trace_panic_freeze(); /* Always warn about user land and unfixable faults. */ if (user_mode_vm(regs) || !search_exception_tables(instruction_pointer(regs))) { printk(KERN_ERR "BUG: Unhandled exception over domain" " %s at 0x%lx - switching to ROOT\n", ipd->name, instruction_pointer(regs)); dump_stack(); ipipe_trace_panic_dump(); #ifdef CONFIG_IPIPE_DEBUG /* Also report fixable ones when debugging is enabled. */ } else { printk(KERN_WARNING "WARNING: Fixable exception over " "domain %s at 0x%lx - switching to ROOT\n", ipd->name, instruction_pointer(regs)); dump_stack(); ipipe_trace_panic_dump(); #endif /* CONFIG_IPIPE_DEBUG */ } } if (vector == ex_do_page_fault) write_cr2(cr2); __ipipe_std_extable[vector](regs, error_code); /* * Relevant for 64-bit: Restore root domain state as the low-level * return code will not align it to regs.flags. */ if (root_entry) local_irq_restore_nosync(flags); return 0; }
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip) { instruction_pointer(regs) = ip; }