static int ptrace_resume(struct task_struct *child, long request, unsigned long data) { if (!valid_signal(data)) return -EIO; if (request == PTRACE_SYSCALL) set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); else clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); #ifdef TIF_SYSCALL_EMU if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP) set_tsk_thread_flag(child, TIF_SYSCALL_EMU); else clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); #endif if (is_singleblock(request)) { if (unlikely(!arch_has_block_step())) return -EIO; user_enable_block_step(child); } else if (is_singlestep(request) || is_sysemu_singlestep(request)) { if (unlikely(!arch_has_single_step())) return -EIO; user_enable_single_step(child); } else { user_disable_single_step(child); } child->exit_code = data; wake_up_state(child, __TASK_TRACED); return 0; }
/* * arch_uprobe_pre_xol - prepare to execute out of line. * @auprobe: the probepoint information. * @regs: reflects the saved user state of current task. */ int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { struct arch_uprobe_task *autask = ¤t->utask->autask; autask->saved_trap_nr = current->thread.trap_nr; current->thread.trap_nr = UPROBE_TRAP_NR; regs->nip = current->utask->xol_vaddr; user_enable_single_step(current); return 0; }
int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { struct uprobe_task *utask = current->utask; /* Initialize with an invalid fault code to detect if ol insn trapped */ current->thread.fault_code = UPROBE_INV_FAULT_CODE; /* Instruction points to execute ol */ instruction_pointer_set(regs, utask->xol_vaddr); user_enable_single_step(current); return 0; }
static int watchpoint_handler(unsigned long addr, unsigned int esr, struct pt_regs *regs) { int i, step = 0, *kernel_step, access; u32 ctrl_reg; u64 val, alignment_mask; struct perf_event *wp, **slots; struct debug_info *debug_info; struct arch_hw_breakpoint *info; struct arch_hw_breakpoint_ctrl ctrl; slots = this_cpu_ptr(wp_on_reg); debug_info = ¤t->thread.debug; for (i = 0; i < core_num_wrps; ++i) { rcu_read_lock(); wp = slots[i]; if (wp == NULL) goto unlock; info = counter_arch_bp(wp); /* AArch32 watchpoints are either 4 or 8 bytes aligned. */ if (is_compat_task()) { if (info->ctrl.len == ARM_BREAKPOINT_LEN_8) alignment_mask = 0x7; else alignment_mask = 0x3; } else { alignment_mask = 0x7; } /* Check if the watchpoint value matches. */ val = read_wb_reg(AARCH64_DBG_REG_WVR, i); if (val != (addr & ~alignment_mask)) goto unlock; /* Possible match, check the byte address select to confirm. */ ctrl_reg = read_wb_reg(AARCH64_DBG_REG_WCR, i); decode_ctrl_reg(ctrl_reg, &ctrl); if (!((1 << (addr & alignment_mask)) & ctrl.len)) goto unlock; /* * Check that the access type matches. * 0 => load, otherwise => store */ access = (esr & AARCH64_ESR_ACCESS_MASK) ? HW_BREAKPOINT_W : HW_BREAKPOINT_R; if (!(access & hw_breakpoint_type(wp))) goto unlock; info->trigger = addr; perf_bp_event(wp, regs); /* Do we need to handle the stepping? */ if (!wp->overflow_handler) step = 1; unlock: rcu_read_unlock(); } if (!step) return 0; /* * We always disable EL0 watchpoints because the kernel can * cause these to fire via an unprivileged access. */ toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 0); if (user_mode(regs)) { debug_info->wps_disabled = 1; /* If we're already stepping a breakpoint, just return. */ if (debug_info->bps_disabled) return 0; if (test_thread_flag(TIF_SINGLESTEP)) debug_info->suspended_step = 1; else user_enable_single_step(current); } else { toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 0); kernel_step = this_cpu_ptr(&stepping_kernel_bp); if (*kernel_step != ARM_KERNEL_STEP_NONE) return 0; if (kernel_active_single_step()) { *kernel_step = ARM_KERNEL_STEP_SUSPEND; } else { *kernel_step = ARM_KERNEL_STEP_ACTIVE; kernel_enable_single_step(regs); } } return 0; }
/* * Debug exception handlers. */ static int breakpoint_handler(unsigned long unused, unsigned int esr, struct pt_regs *regs) { int i, step = 0, *kernel_step; u32 ctrl_reg; u64 addr, val; struct perf_event *bp, **slots; struct debug_info *debug_info; struct arch_hw_breakpoint_ctrl ctrl; slots = this_cpu_ptr(bp_on_reg); addr = instruction_pointer(regs); debug_info = ¤t->thread.debug; for (i = 0; i < core_num_brps; ++i) { rcu_read_lock(); bp = slots[i]; if (bp == NULL) goto unlock; /* Check if the breakpoint value matches. */ val = read_wb_reg(AARCH64_DBG_REG_BVR, i); if (val != (addr & ~0x3)) goto unlock; /* Possible match, check the byte address select to confirm. */ ctrl_reg = read_wb_reg(AARCH64_DBG_REG_BCR, i); decode_ctrl_reg(ctrl_reg, &ctrl); if (!((1 << (addr & 0x3)) & ctrl.len)) goto unlock; counter_arch_bp(bp)->trigger = addr; perf_bp_event(bp, regs); /* Do we need to handle the stepping? */ if (!bp->overflow_handler) step = 1; unlock: rcu_read_unlock(); } if (!step) return 0; if (user_mode(regs)) { debug_info->bps_disabled = 1; toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 0); /* If we're already stepping a watchpoint, just return. */ if (debug_info->wps_disabled) return 0; if (test_thread_flag(TIF_SINGLESTEP)) debug_info->suspended_step = 1; else user_enable_single_step(current); } else { toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 0); kernel_step = this_cpu_ptr(&stepping_kernel_bp); if (*kernel_step != ARM_KERNEL_STEP_NONE) return 0; if (kernel_active_single_step()) { *kernel_step = ARM_KERNEL_STEP_SUSPEND; } else { *kernel_step = ARM_KERNEL_STEP_ACTIVE; kernel_enable_single_step(regs); } } return 0; }