/* * Dump the debug register contents to the user. * We can't dump our per cpu values because it * may contain cpu wide breakpoint, something that * doesn't belong to the current task. * * TODO: include non-ptrace user breakpoints (perf) */ void aout_dump_debugregs(struct user *dump) { int i; int dr7 = 0; struct perf_event *bp; struct arch_hw_breakpoint *info; struct thread_struct *thread = ¤t->thread; for (i = 0; i < HBP_NUM; i++) { bp = thread->ptrace_bps[i]; if (bp && !bp->attr.disabled) { dump->u_debugreg[i] = bp->attr.bp_addr; info = counter_arch_bp(bp); dr7 |= encode_dr7(i, info->len, info->type); } else { dump->u_debugreg[i] = 0; } } dump->u_debugreg[4] = 0; dump->u_debugreg[5] = 0; dump->u_debugreg[6] = current->thread.debugreg6; dump->u_debugreg[7] = dr7; }
int arch_validate_hwbkpt_settings(struct perf_event *bp) { int ret = -EINVAL; struct arch_hw_breakpoint *info = counter_arch_bp(bp); if (!bp) return ret; switch (bp->attr.bp_type) { case HW_BREAKPOINT_R: info->type = DABR_DATA_READ; break; case HW_BREAKPOINT_W: info->type = DABR_DATA_WRITE; break; case HW_BREAKPOINT_R | HW_BREAKPOINT_W: info->type = (DABR_DATA_READ | DABR_DATA_WRITE); break; default: return ret; } info->address = bp->attr.bp_addr; info->len = bp->attr.bp_len; if (info->len > (HW_BREAKPOINT_LEN - (info->address & HW_BREAKPOINT_ALIGN))) return -EINVAL; return 0; }
/* * Install a perf counter breakpoint. * * We seek a free debug address register and use it for this * breakpoint. Eventually we enable it in the debug control register. * * Atomic: we hold the counter->ctx->lock and we only handle variables * and registers local to this cpu. */ int arch_install_hw_breakpoint(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); unsigned long *dr7; int i; for (i = 0; i < HBP_NUM; i++) { struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); if (!*slot) { *slot = bp; break; } } if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot")) return -EBUSY; set_debugreg(info->address, i); __this_cpu_write(cpu_debugreg[i], info->address); dr7 = &__get_cpu_var(cpu_dr7); *dr7 |= encode_dr7(i, info->len, info->type); set_debugreg(*dr7, 7); return 0; }
/* * Enable/disable all of the breakpoints active at the specified * exception level at the register level. * This is used when single-stepping after a breakpoint exception. */ static void toggle_bp_registers(int reg, enum debug_el el, int enable) { int i, max_slots, privilege; u32 ctrl; struct perf_event **slots; switch (reg) { case AARCH64_DBG_REG_BCR: slots = this_cpu_ptr(bp_on_reg); max_slots = core_num_brps; break; case AARCH64_DBG_REG_WCR: slots = this_cpu_ptr(wp_on_reg); max_slots = core_num_wrps; break; default: return; } for (i = 0; i < max_slots; ++i) { if (!slots[i]) continue; privilege = counter_arch_bp(slots[i])->ctrl.privilege; if (debug_exception_level(privilege) != el) continue; ctrl = read_wb_reg(reg, i); if (enable) ctrl |= 0x1; else ctrl &= ~0x1; write_wb_reg(reg, i, ctrl); } }
/* * Handle single-step exceptions following a DABR hit. */ int __kprobes single_step_dabr_instruction(struct die_args *args) { struct pt_regs *regs = args->regs; struct perf_event *bp = NULL; struct arch_hw_breakpoint *info; bp = current->thread.last_hit_ubp; /* * Check if we are single-stepping as a result of a * previous HW Breakpoint exception */ if (!bp) return NOTIFY_DONE; info = counter_arch_bp(bp); /* * We shall invoke the user-defined callback function in the single * stepping handler to confirm to 'trigger-after-execute' semantics */ if (!info->extraneous_interrupt) perf_bp_event(bp, regs); set_dabr(info->address | info->type | DABR_TRANSLATION, info->dabrx); current->thread.last_hit_ubp = NULL; /* * If the process was being single-stepped by ptrace, let the * other single-step actions occur (e.g. generate SIGTRAP). */ if (test_thread_flag(TIF_SINGLESTEP)) return NOTIFY_DONE; return NOTIFY_STOP; }
/* * Handle hitting a HW-breakpoint. */ static void ptrace_hbptriggered(struct perf_event *bp, struct perf_sample_data *data, struct pt_regs *regs) { struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); siginfo_t info = { .si_signo = SIGTRAP, .si_errno = 0, .si_code = TRAP_HWBKPT, .si_addr = (void __user *)(bkpt->trigger), }; #ifdef CONFIG_COMPAT int i; if (!is_compat_task()) goto send_sig; for (i = 0; i < ARM_MAX_BRP; ++i) { if (current->thread.debug.hbp_break[i] == bp) { info.si_errno = (i << 1) + 1; break; } } for (i = 0; i < ARM_MAX_WRP; ++i) { if (current->thread.debug.hbp_watch[i] == bp) { info.si_errno = -((i << 1) + 1); break; } } send_sig: #endif force_sig_info(SIGTRAP, &info, current); } /* * Unregister breakpoints from this task and reset the pointers in * the thread_struct. */ void flush_ptrace_hw_breakpoint(struct task_struct *tsk) { int i; struct thread_struct *t = &tsk->thread; for (i = 0; i < ARM_MAX_BRP; i++) { if (t->debug.hbp_break[i]) { unregister_hw_breakpoint(t->debug.hbp_break[i]); t->debug.hbp_break[i] = NULL; } } for (i = 0; i < ARM_MAX_WRP; i++) { if (t->debug.hbp_watch[i]) { unregister_hw_breakpoint(t->debug.hbp_watch[i]); t->debug.hbp_watch[i] = NULL; } } }
static void set_ibreak_regs(int reg, struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); unsigned long ibreakenable; xtensa_wsr(info->address, SREG_IBREAKA + reg); ibreakenable = xtensa_get_sr(SREG_IBREAKENABLE); xtensa_set_sr(ibreakenable | (1 << reg), SREG_IBREAKENABLE); }
int __kprobes hw_breakpoint_handler(struct die_args *args) { int rc = NOTIFY_STOP; struct perf_event *bp; struct pt_regs *regs = args->regs; int stepped = 1; struct arch_hw_breakpoint *info; unsigned int instr; unsigned long dar = regs->dar; set_dabr(0); rcu_read_lock(); bp = __get_cpu_var(bp_per_reg); if (!bp) goto out; info = counter_arch_bp(bp); if (bp->overflow_handler == ptrace_triggered) { perf_bp_event(bp, regs); rc = NOTIFY_DONE; goto out; } info->extraneous_interrupt = !((bp->attr.bp_addr <= dar) && (dar - bp->attr.bp_addr < bp->attr.bp_len)); if (user_mode(regs)) { bp->ctx->task->thread.last_hit_ubp = bp; regs->msr |= MSR_SE; goto out; } stepped = 0; instr = 0; if (!__get_user_inatomic(instr, (unsigned int *) regs->nip)) stepped = emulate_step(regs, instr); if (!stepped) { WARN(1, "Unable to handle hardware breakpoint. Breakpoint at " "0x%lx will be disabled.", info->address); perf_event_disable(bp); goto out; } if (!info->extraneous_interrupt) perf_bp_event(bp, regs); set_dabr(info->address | info->type | DABR_TRANSLATION); out: rcu_read_unlock(); return rc; }
/* * Validate the arch-specific HW Breakpoint register settings */ int arch_validate_hwbkpt_settings(struct perf_event *bp, struct task_struct *tsk) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); unsigned int align; int ret; ret = arch_build_bp_info(bp); if (ret) return ret; ret = -EINVAL; switch (info->len) { case SH_BREAKPOINT_LEN_1: align = 0; break; case SH_BREAKPOINT_LEN_2: align = 1; break; case SH_BREAKPOINT_LEN_4: align = 3; break; case SH_BREAKPOINT_LEN_8: align = 7; break; default: return ret; } /* * For kernel-addresses, either the address or symbol name can be * specified. */ if (info->name) info->address = (unsigned long)kallsyms_lookup_name(info->name); /* * Check that the low-order bits of the address are appropriate * for the alignment implied by len. */ if (info->address & align) return -EINVAL; /* Check that the virtual address is in the proper range */ if (tsk) { if (!arch_check_va_in_userspace(info->address, info->len)) return -EFAULT; } else { if (!arch_check_va_in_kernelspace(info->address, info->len)) return -EFAULT; } return 0; }
int arch_check_bp_in_kernelspace(struct perf_event *bp) { unsigned int len; unsigned long va; struct arch_hw_breakpoint *info = counter_arch_bp(bp); va = info->address; len = get_hbp_len(info->len); return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); }
int arch_install_hw_breakpoint(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); struct perf_event **slot = &__get_cpu_var(bp_per_reg); *slot = bp; if (current->thread.last_hit_ubp != bp) set_dabr(info->address | info->type | DABR_TRANSLATION); return 0; }
/* * Restores the breakpoint on the debug registers. * Invoke this function if it is known that the execution context is * about to change to cause loss of MSR_SE settings. */ void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs) { struct arch_hw_breakpoint *info; if (likely(!tsk->thread.last_hit_ubp)) return; info = counter_arch_bp(tsk->thread.last_hit_ubp); regs->msr &= ~MSR_SE; set_dabr(info->address | info->type | DABR_TRANSLATION, info->dabrx); tsk->thread.last_hit_ubp = NULL; }
static int ptrace_hbp_get_ctrl(unsigned int note_type, struct task_struct *tsk, unsigned long idx, u32 *ctrl) { struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); if (IS_ERR(bp)) return PTR_ERR(bp); *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0; return 0; }
static void set_dbreak_regs(int reg, struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); unsigned long dbreakc = DBREAKC_MASK_MASK & -info->len; if (info->type & XTENSA_BREAKPOINT_LOAD) dbreakc |= DBREAKC_LOAD_MASK; if (info->type & XTENSA_BREAKPOINT_STORE) dbreakc |= DBREAKC_STOR_MASK; xtensa_wsr(info->address, SREG_DBREAKA + reg); xtensa_wsr(dbreakc, SREG_DBREAKC + reg); }
static int arch_build_bp_info(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); info->address = bp->attr.bp_addr; /* Type */ switch (bp->attr.bp_type) { case HW_BREAKPOINT_W: info->type = X86_BREAKPOINT_WRITE; break; case HW_BREAKPOINT_W | HW_BREAKPOINT_R: info->type = X86_BREAKPOINT_RW; break; case HW_BREAKPOINT_X: info->type = X86_BREAKPOINT_EXECUTE; /* * x86 inst breakpoints need to have a specific undefined len. * But we still need to check userspace is not trying to setup * an unsupported length, to get a range breakpoint for example. */ if (bp->attr.bp_len == sizeof(long)) { info->len = X86_BREAKPOINT_LEN_X; return 0; } default: return -EINVAL; } /* Len */ switch (bp->attr.bp_len) { case HW_BREAKPOINT_LEN_1: info->len = X86_BREAKPOINT_LEN_1; break; case HW_BREAKPOINT_LEN_2: info->len = X86_BREAKPOINT_LEN_2; break; case HW_BREAKPOINT_LEN_4: info->len = X86_BREAKPOINT_LEN_4; break; #ifdef CONFIG_X86_64 case HW_BREAKPOINT_LEN_8: info->len = X86_BREAKPOINT_LEN_8; break; #endif default: return -EINVAL; } return 0; }
static int arch_build_bp_info(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); info->address = bp->attr.bp_addr; /* */ switch (bp->attr.bp_type) { case HW_BREAKPOINT_W: info->type = X86_BREAKPOINT_WRITE; break; case HW_BREAKPOINT_W | HW_BREAKPOINT_R: info->type = X86_BREAKPOINT_RW; break; case HW_BREAKPOINT_X: info->type = X86_BREAKPOINT_EXECUTE; /* */ if (bp->attr.bp_len == sizeof(long)) { info->len = X86_BREAKPOINT_LEN_X; return 0; } default: return -EINVAL; } /* */ switch (bp->attr.bp_len) { case HW_BREAKPOINT_LEN_1: info->len = X86_BREAKPOINT_LEN_1; break; case HW_BREAKPOINT_LEN_2: info->len = X86_BREAKPOINT_LEN_2; break; case HW_BREAKPOINT_LEN_4: info->len = X86_BREAKPOINT_LEN_4; break; #ifdef CONFIG_X86_64 case HW_BREAKPOINT_LEN_8: info->len = X86_BREAKPOINT_LEN_8; break; #endif default: return -EINVAL; } return 0; }
/* * Install a perf counter breakpoint. */ int arch_install_hw_breakpoint(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); struct perf_event **slot, **slots; struct debug_info *debug_info = ¤t->thread.debug; int i, max_slots, ctrl_reg, val_reg, reg_enable; u32 ctrl; if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { /* Breakpoint */ ctrl_reg = AARCH64_DBG_REG_BCR; val_reg = AARCH64_DBG_REG_BVR; slots = __get_cpu_var(bp_on_reg); max_slots = core_num_brps; reg_enable = !debug_info->bps_disabled; } else { /* Watchpoint */ ctrl_reg = AARCH64_DBG_REG_WCR; val_reg = AARCH64_DBG_REG_WVR; slots = __get_cpu_var(wp_on_reg); max_slots = core_num_wrps; reg_enable = !debug_info->wps_disabled; } for (i = 0; i < max_slots; ++i) { slot = &slots[i]; if (!*slot) { *slot = bp; break; } } if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot")) return -ENOSPC; /* Ensure debug monitors are enabled at the correct exception level. */ enable_debug_monitors(debug_exception_level(info->ctrl.privilege)); /* Setup the address register. */ write_wb_reg(val_reg, i, info->address); /* Setup the control register. */ ctrl = encode_ctrl_reg(info->ctrl); write_wb_reg(ctrl_reg, i, reg_enable ? ctrl | 0x1 : ctrl & ~0x1); return 0; }
/* * Install a perf counter breakpoint. * * We seek a free debug address register and use it for this * breakpoint. * * Atomic: we hold the counter->ctx->lock and we only handle variables * and registers local to this cpu. */ int arch_install_hw_breakpoint(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); struct perf_event **slot = &__get_cpu_var(bp_per_reg); *slot = bp; /* * Do not install DABR values if the instruction must be single-stepped. * If so, DABR will be populated in single_step_dabr_instruction(). */ if (current->thread.last_hit_ubp != bp) set_dabr(info->address | info->type | DABR_TRANSLATION, info->dabrx); return 0; }
static int ptrace_gethbpregs(struct task_struct *tsk, long num, unsigned long __user *data) { u32 reg; int idx, ret = 0; struct perf_event *bp; struct arch_hw_breakpoint_ctrl arch_ctrl; if (num == 0) { reg = ptrace_get_hbp_resource_info(); } else { idx = ptrace_hbp_num_to_idx(num); if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) { ret = -EINVAL; goto out; } bp = tsk->thread.debug.hbp[idx]; if (!bp) { reg = 0; goto put; } arch_ctrl = counter_arch_bp(bp)->ctrl; /* * Fix up the len because we may have adjusted it * to compensate for an unaligned address. */ while (!(arch_ctrl.len & 0x1)) arch_ctrl.len >>= 1; if (num & 0x1) reg = bp->attr.bp_addr; else reg = encode_ctrl_reg(arch_ctrl); } put: if (put_user(reg, data)) ret = -EFAULT; out: return ret; }
/* * Validate the arch-specific HW Breakpoint register settings */ int arch_validate_hwbkpt_settings(struct perf_event *bp) { int ret = -EINVAL; struct arch_hw_breakpoint *info = counter_arch_bp(bp); if (!bp) return ret; switch (bp->attr.bp_type) { case HW_BREAKPOINT_R: info->type = DABR_DATA_READ; break; case HW_BREAKPOINT_W: info->type = DABR_DATA_WRITE; break; case HW_BREAKPOINT_R | HW_BREAKPOINT_W: info->type = (DABR_DATA_READ | DABR_DATA_WRITE); break; default: return ret; } info->address = bp->attr.bp_addr; info->len = bp->attr.bp_len; info->dabrx = DABRX_ALL; if (bp->attr.exclude_user) info->dabrx &= ~DABRX_USER; if (bp->attr.exclude_kernel) info->dabrx &= ~DABRX_KERNEL; if (bp->attr.exclude_hv) info->dabrx &= ~DABRX_HYP; /* * Since breakpoint length can be a maximum of HW_BREAKPOINT_LEN(8) * and breakpoint addresses are aligned to nearest double-word * HW_BREAKPOINT_ALIGN by rounding off to the lower address, the * 'symbolsize' should satisfy the check below. */ if (info->len > (HW_BREAKPOINT_LEN - (info->address & HW_BREAKPOINT_ALIGN))) return -EINVAL; return 0; }
void arch_uninstall_hw_breakpoint(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); int i; for (i = 0; i < sh_ubc->num_events; i++) { struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); if (*slot == bp) { *slot = NULL; break; } } if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot")) return; sh_ubc->disable(info, i); clk_disable(sh_ubc->clk); }
int arch_install_hw_breakpoint(struct perf_event *bp) { int i; if (counter_arch_bp(bp)->type == XTENSA_BREAKPOINT_EXECUTE) { /* Breakpoint */ i = alloc_slot(this_cpu_ptr(bp_on_reg), XCHAL_NUM_IBREAK, bp); if (i < 0) return i; set_ibreak_regs(i, bp); } else { /* Watchpoint */ i = alloc_slot(this_cpu_ptr(wp_on_reg), XCHAL_NUM_DBREAK, bp); if (i < 0) return i; set_dbreak_regs(i, bp); } return 0; }
int arch_validate_hwbkpt_settings(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); unsigned int align; int ret; ret = arch_build_bp_info(bp); if (ret) return ret; ret = -EINVAL; switch (info->len) { case X86_BREAKPOINT_LEN_1: align = 0; break; case X86_BREAKPOINT_LEN_2: align = 1; break; case X86_BREAKPOINT_LEN_4: align = 3; break; #ifdef CONFIG_X86_64 case X86_BREAKPOINT_LEN_8: align = 7; break; #endif default: return ret; } /* */ if (info->address & align) return -EINVAL; return 0; }
static int arch_build_bp_info(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); info->address = bp->attr.bp_addr; switch (bp->attr.bp_len) { case HW_BREAKPOINT_LEN_1: info->len = SH_BREAKPOINT_LEN_1; break; case HW_BREAKPOINT_LEN_2: info->len = SH_BREAKPOINT_LEN_2; break; case HW_BREAKPOINT_LEN_4: info->len = SH_BREAKPOINT_LEN_4; break; case HW_BREAKPOINT_LEN_8: info->len = SH_BREAKPOINT_LEN_8; break; default: return -EINVAL; } switch (bp->attr.bp_type) { case HW_BREAKPOINT_R: info->type = SH_BREAKPOINT_READ; break; case HW_BREAKPOINT_W: info->type = SH_BREAKPOINT_WRITE; break; case HW_BREAKPOINT_W | HW_BREAKPOINT_R: info->type = SH_BREAKPOINT_RW; break; default: return -EINVAL; } return 0; }
/* * Validate the arch-specific HW Breakpoint register settings */ int arch_validate_hwbkpt_settings(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); unsigned int align; int ret; ret = arch_build_bp_info(bp); if (ret) return ret; ret = -EINVAL; switch (info->len) { case X86_BREAKPOINT_LEN_1: align = 0; break; case X86_BREAKPOINT_LEN_2: align = 1; break; case X86_BREAKPOINT_LEN_4: align = 3; break; #ifdef CONFIG_X86_64 case X86_BREAKPOINT_LEN_8: align = 7; break; #endif default: return ret; } /* * Check that the low-order bits of the address are appropriate * for the alignment implied by len. */ if (info->address & align) return -EINVAL; return 0; }
/* * Handle hitting a HW-breakpoint. */ static void ptrace_hbptriggered(struct perf_event *bp, int unused, struct perf_sample_data *data, struct pt_regs *regs) { struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); long num; int i; siginfo_t info; for (i = 0; i < ARM_MAX_HBP_SLOTS; ++i) if (current->thread.debug.hbp[i] == bp) break; num = (i == ARM_MAX_HBP_SLOTS) ? 0 : ptrace_hbp_idx_to_num(i); info.si_signo = SIGTRAP; info.si_errno = (int)num; info.si_code = TRAP_HWBKPT; info.si_addr = (void __user *)(bkpt->trigger); force_sig_info(SIGTRAP, &info, current); }
int arch_install_hw_breakpoint(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); int i; for (i = 0; i < sh_ubc->num_events; i++) { struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); if (!*slot) { *slot = bp; break; } } if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot")) return -EBUSY; clk_enable(sh_ubc->clk); sh_ubc->enable(info, i); return 0; }
void arch_uninstall_hw_breakpoint(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); int i; if (info->type == XTENSA_BREAKPOINT_EXECUTE) { unsigned long ibreakenable; /* Breakpoint */ i = free_slot(this_cpu_ptr(bp_on_reg), XCHAL_NUM_IBREAK, bp); if (i >= 0) { ibreakenable = xtensa_get_sr(SREG_IBREAKENABLE); xtensa_set_sr(ibreakenable & ~(1 << i), SREG_IBREAKENABLE); } } else { /* Watchpoint */ i = free_slot(this_cpu_ptr(wp_on_reg), XCHAL_NUM_DBREAK, bp); if (i >= 0) xtensa_wsr(0, SREG_DBREAKC + i); } }
int __kprobes single_step_dabr_instruction(struct die_args *args) { struct pt_regs *regs = args->regs; struct perf_event *bp = NULL; struct arch_hw_breakpoint *bp_info; bp = current->thread.last_hit_ubp; if (!bp) return NOTIFY_DONE; bp_info = counter_arch_bp(bp); if (!bp_info->extraneous_interrupt) perf_bp_event(bp, regs); set_dabr(bp_info->address | bp_info->type | DABR_TRANSLATION); current->thread.last_hit_ubp = NULL; if (test_thread_flag(TIF_SINGLESTEP)) return NOTIFY_DONE; return NOTIFY_STOP; }
/* * Uninstall the breakpoint contained in the given counter. * * First we search the debug address register it uses and then we disable * it. * * Atomic: we hold the counter->ctx->lock and we only handle variables * and registers local to this cpu. */ void arch_uninstall_hw_breakpoint(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); unsigned long *dr7; int i; for (i = 0; i < HBP_NUM; i++) { struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]); if (*slot == bp) { *slot = NULL; break; } } if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot")) return; dr7 = &__get_cpu_var(cpu_dr7); *dr7 &= ~__encode_dr7(i, info->len, info->type); set_debugreg(*dr7, 7); }