static int validate_hw_breakpoint(struct perf_event *bp) { int ret; ret = arch_validate_hwbkpt_settings(bp); if (ret) return ret; if (arch_check_bp_in_kernelspace(bp)) { if (bp->attr.exclude_kernel) return -EINVAL; /* * Don't let unprivileged users set a breakpoint in the trap * path to avoid trap recursion attacks. */ if (!capable(CAP_SYS_ADMIN)) return -EPERM; } return 0; }
static int __kprobes hw_breakpoint_handler(struct die_args *args) { int cpu, i, rc = NOTIFY_STOP; struct perf_event *bp; unsigned int cmf, resume_mask; cmf = sh_ubc->triggered_mask(); if (unlikely(!cmf)) return NOTIFY_DONE; resume_mask = sh_ubc->active_mask(); sh_ubc->disable_all(); cpu = get_cpu(); for (i = 0; i < sh_ubc->num_events; i++) { unsigned long event_mask = (1 << i); if (likely(!(cmf & event_mask))) continue; rcu_read_lock(); bp = per_cpu(bp_per_reg[i], cpu); if (bp) rc = NOTIFY_DONE; sh_ubc->clear_triggered_mask(event_mask); if (!bp) { rcu_read_unlock(); break; } if (bp->overflow_handler == ptrace_triggered) resume_mask &= ~(1 << i); perf_bp_event(bp, args->regs); if (!arch_check_bp_in_kernelspace(bp)) { siginfo_t info; info.si_signo = args->signr; info.si_errno = notifier_to_errno(rc); info.si_code = TRAP_HWBKPT; force_sig_info(args->signr, &info, current); } rcu_read_unlock(); } if (cmf == 0) rc = NOTIFY_DONE; sh_ubc->enable_all(resume_mask); put_cpu(); return rc; }
/* * Construct an arch_hw_breakpoint from a perf_event. */ static int arch_build_bp_info(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); /* Type */ switch (bp->attr.bp_type) { case HW_BREAKPOINT_X: info->ctrl.type = ARM_BREAKPOINT_EXECUTE; break; case HW_BREAKPOINT_R: info->ctrl.type = ARM_BREAKPOINT_LOAD; break; case HW_BREAKPOINT_W: info->ctrl.type = ARM_BREAKPOINT_STORE; break; case HW_BREAKPOINT_RW: info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE; break; default: return -EINVAL; } /* Len */ switch (bp->attr.bp_len) { case HW_BREAKPOINT_LEN_1: info->ctrl.len = ARM_BREAKPOINT_LEN_1; break; case HW_BREAKPOINT_LEN_2: info->ctrl.len = ARM_BREAKPOINT_LEN_2; break; case HW_BREAKPOINT_LEN_4: info->ctrl.len = ARM_BREAKPOINT_LEN_4; break; case HW_BREAKPOINT_LEN_8: info->ctrl.len = ARM_BREAKPOINT_LEN_8; break; default: return -EINVAL; } /* * On AArch64, we only permit breakpoints of length 4, whereas * AArch32 also requires breakpoints of length 2 for Thumb. * Watchpoints can be of length 1, 2, 4 or 8 bytes. */ if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { if (is_compat_task()) { if (info->ctrl.len != ARM_BREAKPOINT_LEN_2 && info->ctrl.len != ARM_BREAKPOINT_LEN_4) return -EINVAL; } else if (info->ctrl.len != ARM_BREAKPOINT_LEN_4) { /* * FIXME: Some tools (I'm looking at you perf) assume * that breakpoints should be sizeof(long). This * is nonsense. For now, we fix up the parameter * but we should probably return -EINVAL instead. */ info->ctrl.len = ARM_BREAKPOINT_LEN_4; } } /* Address */ info->address = bp->attr.bp_addr; /* * Privilege * Note that we disallow combined EL0/EL1 breakpoints because * that would complicate the stepping code. */ if (arch_check_bp_in_kernelspace(bp)) info->ctrl.privilege = AARCH64_BREAKPOINT_EL1; else info->ctrl.privilege = AARCH64_BREAKPOINT_EL0; /* Enabled? */ info->ctrl.enabled = !bp->attr.disabled; return 0; }
static int __kprobes hw_breakpoint_handler(struct die_args *args) { int cpu, i, rc = NOTIFY_STOP; struct perf_event *bp; unsigned int cmf, resume_mask; /* * Do an early return if none of the channels triggered. */ cmf = sh_ubc->triggered_mask(); if (unlikely(!cmf)) return NOTIFY_DONE; /* * By default, resume all of the active channels. */ resume_mask = sh_ubc->active_mask(); /* * Disable breakpoints during exception handling. */ sh_ubc->disable_all(); cpu = get_cpu(); for (i = 0; i < sh_ubc->num_events; i++) { unsigned long event_mask = (1 << i); if (likely(!(cmf & event_mask))) continue; /* * The counter may be concurrently released but that can only * occur from a call_rcu() path. We can then safely fetch * the breakpoint, use its callback, touch its counter * while we are in an rcu_read_lock() path. */ rcu_read_lock(); bp = per_cpu(bp_per_reg[i], cpu); if (bp) rc = NOTIFY_DONE; /* * Reset the condition match flag to denote completion of * exception handling. */ sh_ubc->clear_triggered_mask(event_mask); /* * bp can be NULL due to concurrent perf counter * removing. */ if (!bp) { rcu_read_unlock(); break; } /* * Don't restore the channel if the breakpoint is from * ptrace, as it always operates in one-shot mode. */ if (bp->overflow_handler == ptrace_triggered) resume_mask &= ~(1 << i); perf_bp_event(bp, args->regs); /* Deliver the signal to userspace */ if (!arch_check_bp_in_kernelspace(bp)) { siginfo_t info; info.si_signo = args->signr; info.si_errno = notifier_to_errno(rc); info.si_code = TRAP_HWBKPT; force_sig_info(args->signr, &info, current); } rcu_read_unlock(); } if (cmf == 0) rc = NOTIFY_DONE; sh_ubc->enable_all(resume_mask); put_cpu(); return rc; }