static int __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p; void *addr = (void *)regs->pc; int ret = 0; pr_debug("kprobe_handler: kprobe_running=%p\n", kprobe_running()); /* * We don't want to be preempted for the entire * duration of kprobe processing */ preempt_disable(); /* Check that we're not recursing */ if (kprobe_running()) { p = get_kprobe(addr); if (p) { if (kprobe_status == KPROBE_HIT_SS) { printk("FIXME: kprobe hit while single-stepping!\n"); goto no_kprobe; } printk("FIXME: kprobe hit while handling another kprobe\n"); goto no_kprobe; } else { p = kprobe_running(); if (p->break_handler && p->break_handler(p, regs)) goto ss_probe; } /* If it's not ours, can't be delete race, (we hold lock). */ goto no_kprobe; } p = get_kprobe(addr); if (!p) goto no_kprobe; kprobe_status = KPROBE_HIT_ACTIVE; set_current_kprobe(p); if (p->pre_handler && p->pre_handler(p, regs)) /* handler has already set things up, so skip ss setup */ return 1; ss_probe: prepare_singlestep(p, regs); kprobe_status = KPROBE_HIT_SS; return 1; no_kprobe: preempt_enable_no_resched(); return ret; }
static int __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p; void *addr = (void *)regs->pc; int ret = 0; pr_debug("kprobe_handler: kprobe_running=%p\n", kprobe_running()); preempt_disable(); if (kprobe_running()) { p = get_kprobe(addr); if (p) { if (kprobe_status == KPROBE_HIT_SS) { printk("FIXME: kprobe hit while single-stepping!\n"); goto no_kprobe; } printk("FIXME: kprobe hit while handling another kprobe\n"); goto no_kprobe; } else { p = kprobe_running(); if (p->break_handler && p->break_handler(p, regs)) goto ss_probe; } goto no_kprobe; } p = get_kprobe(addr); if (!p) goto no_kprobe; kprobe_status = KPROBE_HIT_ACTIVE; set_current_kprobe(p); if (p->pre_handler && p->pre_handler(p, regs)) return 1; ss_probe: prepare_singlestep(p, regs); kprobe_status = KPROBE_HIT_SS; return 1; no_kprobe: preempt_enable_no_resched(); return ret; }
/* Ftrace callback handler for kprobes */ void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *ops, struct pt_regs *regs) { struct kprobe *p; struct kprobe_ctlblk *kcb; unsigned long flags; /* Disable irq for emulating a breakpoint and avoiding preempt */ local_irq_save(flags); p = get_kprobe((kprobe_opcode_t *)ip); if (unlikely(!p) || kprobe_disabled(p)) goto end; kcb = get_kprobe_ctlblk(); if (kprobe_running()) { kprobes_inc_nmissed_count(p); } else { /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */ regs->ip = ip + sizeof(kprobe_opcode_t); __this_cpu_write(current_kprobe, p); kcb->kprobe_status = KPROBE_HIT_ACTIVE; if (!p->pre_handler || !p->pre_handler(p, regs)) __skip_singlestep(p, regs, kcb); /* * If pre_handler returns !0, it sets regs->ip and * resets current kprobe. */ } end: local_irq_restore(flags); }
static unsigned long __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) { struct kprobe *kp; kp = get_kprobe((void *)addr); /* There is no probe, return original address */ if (!kp) return addr; /* * Basically, kp->ainsn.insn has an original instruction. * However, RIP-relative instruction can not do single-stepping * at different place, __copy_instruction() tweaks the displacement of * that instruction. In that case, we can't recover the instruction * from the kp->ainsn.insn. * * On the other hand, kp->opcode has a copy of the first byte of * the probed instruction, which is overwritten by int3. And * the instruction at kp->addr is not modified by kprobes except * for the first byte, we can recover the original instruction * from it and kp->opcode. */ memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); buf[0] = kp->opcode; return (unsigned long)buf; }
int register_kprobe(struct kprobe *p) { int ret = 0; unsigned long flags = 0; if ((ret = arch_prepare_kprobe(p)) != 0) { goto rm_kprobe; } spin_lock_irqsave(&kprobe_lock, flags); INIT_HLIST_NODE(&p->hlist); if (get_kprobe(p->addr)) { ret = -EEXIST; goto out; } arch_copy_kprobe(p); hlist_add_head(&p->hlist, &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); p->opcode = *p->addr; *p->addr = BREAKPOINT_INSTRUCTION; flush_icache_range((unsigned long) p->addr, (unsigned long) p->addr + sizeof(kprobe_opcode_t)); out: spin_unlock_irqrestore(&kprobe_lock, flags); rm_kprobe: if (ret == -EEXIST) arch_remove_kprobe(p); return ret; }
/** * If an illegal slot instruction exception occurs for an address * containing a kprobe, remove the probe. * * Returns 0 if the exception was handled successfully, 1 otherwise. */ int __kprobes kprobe_handle_illslot(unsigned long pc) { struct kprobe *p = get_kprobe((kprobe_opcode_t *) pc + 1); if (p != NULL) { printk("Warning: removing kprobe from delay slot: 0x%.8x\n", (unsigned int)pc + 2); unregister_kprobe(p); return 0; } return 1; }
static unsigned long __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) { struct kprobe *kp; unsigned long faddr; kp = get_kprobe((void *)addr); faddr = ftrace_location(addr); /* * Addresses inside the ftrace location are refused by * arch_check_ftrace_location(). Something went terribly wrong * if such an address is checked here. */ if (WARN_ON(faddr && faddr != addr)) return 0UL; /* * Use the current code if it is not modified by Kprobe * and it cannot be modified by ftrace. */ if (!kp && !faddr) return addr; /* * Basically, kp->ainsn.insn has an original instruction. * However, RIP-relative instruction can not do single-stepping * at different place, __copy_instruction() tweaks the displacement of * that instruction. In that case, we can't recover the instruction * from the kp->ainsn.insn. * * On the other hand, in case on normal Kprobe, kp->opcode has a copy * of the first byte of the probed instruction, which is overwritten * by int3. And the instruction at kp->addr is not modified by kprobes * except for the first byte, we can recover the original instruction * from it and kp->opcode. * * In case of Kprobes using ftrace, we do not have a copy of * the original instruction. In fact, the ftrace location might * be modified at anytime and even could be in an inconsistent state. * Fortunately, we know that the original code is the ideal 5-byte * long NOP. */ if (probe_kernel_read(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t))) return 0UL; if (faddr) memcpy(buf, ideal_nops[NOP_ATOMIC5], 5); else buf[0] = kp->opcode; return (unsigned long)buf; }
unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr) { struct optimized_kprobe *op; struct kprobe *kp; long offs; int i; for (i = 0; i < RELATIVEJUMP_SIZE; i++) { kp = get_kprobe((void *)addr - i); /* This function only handles jump-optimized kprobe */ if (kp && kprobe_optimized(kp)) { op = container_of(kp, struct optimized_kprobe, kp); /* If op->list is not empty, op is under optimizing */ if (list_empty(&op->list)) goto found; } }
/* Ftrace callback handler for kprobes */ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip, struct ftrace_ops *ops, struct pt_regs *regs) { struct kprobe *p; struct kprobe_ctlblk *kcb; preempt_disable(); p = get_kprobe((kprobe_opcode_t *)nip); if (unlikely(!p) || kprobe_disabled(p)) goto end; kcb = get_kprobe_ctlblk(); if (kprobe_running()) { kprobes_inc_nmissed_count(p); } else { unsigned long orig_nip = regs->nip; /* * On powerpc, NIP is *before* this instruction for the * pre handler */ regs->nip -= MCOUNT_INSN_SIZE; __this_cpu_write(current_kprobe, p); kcb->kprobe_status = KPROBE_HIT_ACTIVE; if (!p->pre_handler || !p->pre_handler(p, regs)) __skip_singlestep(p, regs, kcb, orig_nip); else { /* * If pre_handler returns !0, it sets regs->nip and * resets current kprobe. In this case, we should not * re-enable preemption. */ return; } } end: preempt_enable_no_resched(); }
static int __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p; void *addr = (void *) regs->tpc; int ret = 0; struct kprobe_ctlblk *kcb; /* * We don't want to be preempted for the entire * duration of kprobe processing */ preempt_disable(); kcb = get_kprobe_ctlblk(); if (kprobe_running()) { p = get_kprobe(addr); if (p) { if (kcb->kprobe_status == KPROBE_HIT_SS) { regs->tstate = ((regs->tstate & ~TSTATE_PIL) | kcb->kprobe_orig_tstate_pil); goto no_kprobe; } /* We have reentered the kprobe_handler(), since * another probe was hit while within the handler. * We here save the original kprobes variables and * just single step on the instruction of the new probe * without calling any user handlers. */ save_previous_kprobe(kcb); set_current_kprobe(p, regs, kcb); kprobes_inc_nmissed_count(p); kcb->kprobe_status = KPROBE_REENTER; prepare_singlestep(p, regs, kcb); return 1; } else { if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) { /* The breakpoint instruction was removed by * another cpu right after we hit, no further * handling of this interrupt is appropriate */ ret = 1; goto no_kprobe; } p = __get_cpu_var(current_kprobe); if (p->break_handler && p->break_handler(p, regs)) goto ss_probe; } goto no_kprobe; } p = get_kprobe(addr); if (!p) { if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) { /* * The breakpoint instruction was removed right * after we hit it. Another cpu has removed * either a probepoint or a debugger breakpoint * at this address. In either case, no further * handling of this interrupt is appropriate. */ ret = 1; } /* Not one of ours: let kernel handle it */ goto no_kprobe; } set_current_kprobe(p, regs, kcb); kcb->kprobe_status = KPROBE_HIT_ACTIVE; if (p->pre_handler && p->pre_handler(p, regs)) return 1; ss_probe: prepare_singlestep(p, regs, kcb); kcb->kprobe_status = KPROBE_HIT_SS; return 1; no_kprobe: preempt_enable_no_resched(); return ret; }
/* * Called with IRQs disabled. IRQs must remain disabled from that point * all the way until processing this kprobe is complete. The current * kprobes implementation cannot process more than one nested level of * kprobe, and that level is reserved for user kprobe handlers, so we can't * risk encountering a new kprobe in an interrupt handler. */ void __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p, *cur; struct kprobe_ctlblk *kcb; kcb = get_kprobe_ctlblk(); cur = kprobe_running(); #ifdef CONFIG_THUMB2_KERNEL /* * First look for a probe which was registered using an address with * bit 0 set, this is the usual situation for pointers to Thumb code. * If not found, fallback to looking for one with bit 0 clear. */ p = get_kprobe((kprobe_opcode_t *)(regs->ARM_pc | 1)); if (!p) p = get_kprobe((kprobe_opcode_t *)regs->ARM_pc); #else /* ! CONFIG_THUMB2_KERNEL */ p = get_kprobe((kprobe_opcode_t *)regs->ARM_pc); #endif if (p) { if (cur) { /* Kprobe is pending, so we're recursing. */ switch (kcb->kprobe_status) { case KPROBE_HIT_ACTIVE: case KPROBE_HIT_SSDONE: /* A pre- or post-handler probe got us here. */ kprobes_inc_nmissed_count(p); save_previous_kprobe(kcb); set_current_kprobe(p); kcb->kprobe_status = KPROBE_REENTER; singlestep(p, regs, kcb); restore_previous_kprobe(kcb); break; default: /* impossible cases */ BUG(); } } else if (p->ainsn.insn_check_cc(regs->ARM_cpsr)) { /* Probe hit and conditional execution check ok. */ set_current_kprobe(p); kcb->kprobe_status = KPROBE_HIT_ACTIVE; /* * If we have no pre-handler or it returned 0, we * continue with normal processing. If we have a * pre-handler and it returned non-zero, it prepped * for calling the break_handler below on re-entry, * so get out doing nothing more here. */ if (!p->pre_handler || !p->pre_handler(p, regs)) { kcb->kprobe_status = KPROBE_HIT_SS; singlestep(p, regs, kcb); if (p->post_handler) { kcb->kprobe_status = KPROBE_HIT_SSDONE; p->post_handler(p, regs, 0); } reset_current_kprobe(); } } else { /* * Probe hit but conditional execution check failed, * so just skip the instruction and continue as if * nothing had happened. */ singlestep_skip(p, regs); } } else if (cur) { /* We probably hit a jprobe. Call its break handler. */ if (cur->break_handler && cur->break_handler(cur, regs)) { kcb->kprobe_status = KPROBE_HIT_SS; singlestep(cur, regs, kcb); if (cur->post_handler) { kcb->kprobe_status = KPROBE_HIT_SSDONE; cur->post_handler(cur, regs, 0); } } reset_current_kprobe(); } else { /* * The probe was removed and a race is in progress. * There is nothing we can do about it. Let's restart * the instruction. By the time we can restart, the * real instruction will be there. */ } }
static int __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p; void *addr = (void *) regs->tpc; int ret = 0; preempt_disable(); if (kprobe_running()) { /* We *are* holding lock here, so this is safe. * Disarm the probe we just hit, and ignore it. */ p = get_kprobe(addr); if (p) { if (kprobe_status == KPROBE_HIT_SS) { regs->tstate = ((regs->tstate & ~TSTATE_PIL) | current_kprobe_orig_tstate_pil); unlock_kprobes(); goto no_kprobe; } /* We have reentered the kprobe_handler(), since * another probe was hit while within the handler. * We here save the original kprobes variables and * just single step on the instruction of the new probe * without calling any user handlers. */ save_previous_kprobe(); set_current_kprobe(p, regs); p->nmissed++; kprobe_status = KPROBE_REENTER; prepare_singlestep(p, regs); return 1; } else { p = current_kprobe; if (p->break_handler && p->break_handler(p, regs)) goto ss_probe; } /* If it's not ours, can't be delete race, (we hold lock). */ goto no_kprobe; } lock_kprobes(); p = get_kprobe(addr); if (!p) { unlock_kprobes(); if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) { /* * The breakpoint instruction was removed right * after we hit it. Another cpu has removed * either a probepoint or a debugger breakpoint * at this address. In either case, no further * handling of this interrupt is appropriate. */ ret = 1; } /* Not one of ours: let kernel handle it */ goto no_kprobe; } set_current_kprobe(p, regs); kprobe_status = KPROBE_HIT_ACTIVE; if (p->pre_handler && p->pre_handler(p, regs)) return 1; ss_probe: prepare_singlestep(p, regs); kprobe_status = KPROBE_HIT_SS; return 1; no_kprobe: preempt_enable_no_resched(); return ret; }
/* * Called with IRQs disabled. IRQs must remain disabled from that point * all the way until processing this kprobe is complete. The current * kprobes implementation cannot process more than one nested level of * kprobe, and that level is reserved for user kprobe handlers, so we can't * risk encountering a new kprobe in an interrupt handler. */ void __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p, *cur; struct kprobe_ctlblk *kcb; kprobe_opcode_t *addr = (kprobe_opcode_t *)regs->ARM_pc; kcb = get_kprobe_ctlblk(); cur = kprobe_running(); p = get_kprobe(addr); if (p) { if (cur) { /* Kprobe is pending, so we're recursing. */ switch (kcb->kprobe_status) { case KPROBE_HIT_ACTIVE: case KPROBE_HIT_SSDONE: /* A pre- or post-handler probe got us here. */ kprobes_inc_nmissed_count(p); save_previous_kprobe(kcb); set_current_kprobe(p); kcb->kprobe_status = KPROBE_REENTER; singlestep(p, regs, kcb); restore_previous_kprobe(kcb); break; default: /* impossible cases */ BUG(); } } else { set_current_kprobe(p); kcb->kprobe_status = KPROBE_HIT_ACTIVE; /* * If we have no pre-handler or it returned 0, we * continue with normal processing. If we have a * pre-handler and it returned non-zero, it prepped * for calling the break_handler below on re-entry, * so get out doing nothing more here. */ if (!p->pre_handler || !p->pre_handler(p, regs)) { kcb->kprobe_status = KPROBE_HIT_SS; singlestep(p, regs, kcb); if (p->post_handler) { kcb->kprobe_status = KPROBE_HIT_SSDONE; p->post_handler(p, regs, 0); } reset_current_kprobe(); } } } else if (cur) { /* We probably hit a jprobe. Call its break handler. */ if (cur->break_handler && cur->break_handler(cur, regs)) { kcb->kprobe_status = KPROBE_HIT_SS; singlestep(cur, regs, kcb); if (cur->post_handler) { kcb->kprobe_status = KPROBE_HIT_SSDONE; cur->post_handler(cur, regs, 0); } } reset_current_kprobe(); } else { /* * The probe was removed and a race is in progress. * There is nothing we can do about it. Let's restart * the instruction. By the time we can restart, the * real instruction will be there. */ } }
static inline int kprobe_handler(struct pt_regs *regs) { struct kprobe *p; int ret = 0; unsigned int *addr = (unsigned int *)regs->nip; /* Check we're not actually recursing */ if (kprobe_running()) { /* We *are* holding lock here, so this is safe. Disarm the probe we just hit, and ignore it. */ p = get_kprobe(addr); if (p) { if (kprobe_status == KPROBE_HIT_SS) { regs->msr &= ~MSR_SE; regs->msr |= kprobe_saved_msr; unlock_kprobes(); goto no_kprobe; } disarm_kprobe(p, regs); ret = 1; } else { p = current_kprobe; if (p->break_handler && p->break_handler(p, regs)) { goto ss_probe; } } /* If it's not ours, can't be delete race, (we hold lock). */ goto no_kprobe; } lock_kprobes(); p = get_kprobe(addr); if (!p) { unlock_kprobes(); if (*addr != BREAKPOINT_INSTRUCTION) { /* * PowerPC has multiple variants of the "trap" * instruction. If the current instruction is a * trap variant, it could belong to someone else */ kprobe_opcode_t cur_insn = *addr; if (IS_TW(cur_insn) || IS_TD(cur_insn) || IS_TWI(cur_insn) || IS_TDI(cur_insn)) goto no_kprobe; /* * The breakpoint instruction was removed right * after we hit it. Another cpu has removed * either a probepoint or a debugger breakpoint * at this address. In either case, no further * handling of this interrupt is appropriate. */ ret = 1; } /* Not one of ours: let kernel handle it */ goto no_kprobe; } kprobe_status = KPROBE_HIT_ACTIVE; current_kprobe = p; kprobe_saved_msr = regs->msr; if (p->pre_handler && p->pre_handler(p, regs)) /* handler has already set things up, so skip ss setup */ return 1; ss_probe: prepare_singlestep(p, regs); kprobe_status = KPROBE_HIT_SS; /* * This preempt_disable() matches the preempt_enable_no_resched() * in post_kprobe_handler(). */ preempt_disable(); return 1; no_kprobe: return ret; }