/* * Remove emplaced single-step breakpoints, returning true if we hit one of * them. */ static bool kgdb_arch_undo_singlestep(struct pt_regs *regs) { bool hit = false; u8 *x = kgdb_sstep_bp_addr[0], *y = kgdb_sstep_bp_addr[1]; u8 opcode; if (kgdb_sstep_thread == current_thread_info()) { if (x) { if (x == (u8 *)regs->pc) hit = true; if (probe_kernel_read(&opcode, x, 1) < 0 || opcode != 0xff) BUG(); probe_kernel_write(x, &kgdb_sstep_bp[0], 1); debugger_local_cache_flushinv_one(x); } if (y) { if (y == (u8 *)regs->pc) hit = true; if (probe_kernel_read(&opcode, y, 1) < 0 || opcode != 0xff) BUG(); probe_kernel_write(y, &kgdb_sstep_bp[1], 1); debugger_local_cache_flushinv_one(y); } } kgdb_sstep_bp_addr[0] = NULL; kgdb_sstep_bp_addr[1] = NULL; kgdb_sstep_thread = NULL; return hit; }
int module_trampoline_target(struct module *mod, u32 *trampoline, unsigned long *target) { u32 buf[2]; u16 upper, lower; long offset; void *toc_entry; if (probe_kernel_read(buf, trampoline, sizeof(buf))) return -EFAULT; upper = buf[0] & 0xffff; lower = buf[1] & 0xffff; /* perform the addis/addi, both signed */ offset = ((short)upper << 16) + (short)lower; /* * Now get the address this trampoline jumps to. This * is always 32 bytes into our trampoline stub. */ toc_entry = (void *)mod->arch.toc + offset + 32; if (probe_kernel_read(target, toc_entry, sizeof(*target))) return -EFAULT; return 0; }
/** * probe_kthread_data - speculative version of kthread_data() * @task: possible kthread task in question * * @task could be a kthread task. Return the data value specified when it * was created if accessible. If @task isn't a kthread task or its data is * inaccessible for any reason, %NULL is returned. This function requires * that @task itself is safe to dereference. */ void *probe_kthread_data(struct task_struct *task) { struct kthread *kthread = to_kthread(task); void *data = NULL; probe_kernel_read(&data, &kthread->data, sizeof(data)); return data; }
static ssize_t mychardrv_write (struct file * f, const char __user * address, size_t size, loff_t * offset) { struct strings_representation * sr = f->private_data; ssize_t res = 0; long result; struct file_string * fstr = find_file_string(f); if (fstr ==NULL) { // add a new string fstr = kzalloc(sizeof(struct file_string), GFP_USER); if (fstr == NULL) { res = -ENOMEM; goto error; } fstr->string = kzalloc(size, GFP_USER); if (fstr->string==NULL) { kfree (fstr); res = -ENOMEM; goto error; } fstr->id = sr->current_id; fstr->next = sr->next; sr->next = fstr; } else { char * tmp = kzalloc(size, GFP_USER); if (tmp==NULL) { res = -ENOMEM; goto error; } kfree (fstr->string); fstr->string = tmp; } result = probe_kernel_read(fstr->string, address, size); printk (KERN_DEBUG "probe_kernel_write returns %ld\n", result); res = result; if (result < 0) { printk (KERN_DEBUG "string is nullified\n"); fstr->string = NULL; goto error; } res = copy_from_user(fstr->string, address, size); if (res == 0) { printk(KERN_DEBUG "copied %ld bytes from the user at offset %ld\n", size, offset); res = size; *offset += size; } else { printk(KERN_DEBUG "error: %ld bytes could not be copied from user at offset %ld\n", res, offset); res = (ssize_t) -EFAULT; } error: return res; }
BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr) { int ret; ret = probe_kernel_read(dst, unsafe_ptr, size); if (unlikely(ret < 0)) memset(dst, 0, size); return ret; }
static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) { void *p; #ifdef CONFIG_DEBUG_PAGEALLOC probe_kernel_read(&p, (void **)(object + s->offset), sizeof(p)); #else p = get_freepointer(s, object); #endif return p; }
/* * Weak aliases for breakpoint management, * can be overriden by architectures when needed: */ int __weak kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr) { int err; err = probe_kernel_read(saved_instr, (char *)addr, BREAK_INSTR_SIZE); if (err) return err; return probe_kernel_write((char *)addr, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE); }
/* * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always * little-endian. */ int __kprobes aarch64_insn_read(void *addr, u32 *insnp) { int ret; u32 val; ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE); if (!ret) *insnp = le32_to_cpu(val); return ret; }
/* * Weak aliases for breakpoint management, * can be overriden by architectures when needed: */ int __weak kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) { int err; err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE); if (err) return err; err = probe_kernel_write((char *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE); return err; }
static u64 bpf_probe_read(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) { void *dst = (void *) (long) r1; int ret, size = (int) r2; void *unsafe_ptr = (void *) (long) r3; ret = probe_kernel_read(dst, unsafe_ptr, size); if (unlikely(ret < 0)) memset(dst, 0, size); return ret; }
/* * Copy an instruction with recovering modified instruction by kprobes * and adjust the displacement if the instruction uses the %rip-relative * addressing mode. Note that since @real will be the final place of copied * instruction, displacement must be adjust by @real, not @dest. * This returns the length of copied instruction, or 0 if it has an error. */ int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn) { kprobe_opcode_t buf[MAX_INSN_SIZE]; unsigned long recovered_insn = recover_probed_instruction(buf, (unsigned long)src); if (!recovered_insn || !insn) return 0; /* This can access kernel text if given address is not recovered */ if (probe_kernel_read(dest, (void *)recovered_insn, MAX_INSN_SIZE)) return 0; kernel_insn_init(insn, dest, MAX_INSN_SIZE); insn_get_length(insn); /* Another subsystem puts a breakpoint, failed to recover */ if (insn->opcode.bytes[0] == BREAKPOINT_INSTRUCTION) return 0; /* We should not singlestep on the exception masking instructions */ if (insn_masking_exception(insn)) return 0; #ifdef CONFIG_X86_64 /* Only x86_64 has RIP relative instructions */ if (insn_rip_relative(insn)) { s64 newdisp; u8 *disp; /* * The copied instruction uses the %rip-relative addressing * mode. Adjust the displacement for the difference between * the original location of this instruction and the location * of the copy that will actually be run. The tricky bit here * is making sure that the sign extension happens correctly in * this calculation, since we need a signed 32-bit result to * be sign-extended to 64 bits when it's added to the %rip * value and yield the same 64-bit result that the sign- * extension of the original signed 32-bit displacement would * have given. */ newdisp = (u8 *) src + (s64) insn->displacement.value - (u8 *) real; if ((s64) (s32) newdisp != newdisp) { pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp); return 0; } disp = (u8 *) dest + insn_offset_displacement(insn); *(s32 *) disp = (s32) newdisp; } #endif return insn->length; }
static unsigned long __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) { struct kprobe *kp; unsigned long faddr; kp = get_kprobe((void *)addr); faddr = ftrace_location(addr); /* * Addresses inside the ftrace location are refused by * arch_check_ftrace_location(). Something went terribly wrong * if such an address is checked here. */ if (WARN_ON(faddr && faddr != addr)) return 0UL; /* * Use the current code if it is not modified by Kprobe * and it cannot be modified by ftrace. */ if (!kp && !faddr) return addr; /* * Basically, kp->ainsn.insn has an original instruction. * However, RIP-relative instruction can not do single-stepping * at different place, __copy_instruction() tweaks the displacement of * that instruction. In that case, we can't recover the instruction * from the kp->ainsn.insn. * * On the other hand, in case on normal Kprobe, kp->opcode has a copy * of the first byte of the probed instruction, which is overwritten * by int3. And the instruction at kp->addr is not modified by kprobes * except for the first byte, we can recover the original instruction * from it and kp->opcode. * * In case of Kprobes using ftrace, we do not have a copy of * the original instruction. In fact, the ftrace location might * be modified at anytime and even could be in an inconsistent state. * Fortunately, we know that the original code is the ideal 5-byte * long NOP. */ if (probe_kernel_read(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t))) return 0UL; if (faddr) memcpy(buf, ideal_nops[NOP_ATOMIC5], 5); else buf[0] = kp->opcode; return (unsigned long)buf; }
static void run_plant_and_detach_test(int is_early) { char before[BREAK_INSTR_SIZE]; char after[BREAK_INSTR_SIZE]; probe_kernel_read(before, (char *)kgdbts_break_test, BREAK_INSTR_SIZE); init_simple_test(); ts.tst = plant_and_detach_test; ts.name = "plant_and_detach_test"; /* Activate test with initial breakpoint */ if (!is_early) kgdb_breakpoint(); probe_kernel_read(after, (char *)kgdbts_break_test, BREAK_INSTR_SIZE); if (memcmp(before, after, BREAK_INSTR_SIZE)) { printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n"); panic("kgdb memory corruption"); } /* complete the detach test */ if (!is_early) kgdbts_break_test(); }
/* * Replace the next instruction after the current instruction with a * breakpoint instruction. */ static void do_single_step(struct pt_regs *regs) { unsigned long addr_wr; /* Determine where the target instruction will send us to. */ stepped_addr = get_step_address(regs); probe_kernel_read((char *)&stepped_instr, (char *)stepped_addr, BREAK_INSTR_SIZE); addr_wr = writable_address(stepped_addr); probe_kernel_write((char *)addr_wr, (char *)&singlestep_insn, BREAK_INSTR_SIZE); smp_wmb(); flush_icache_range(stepped_addr, stepped_addr + BREAK_INSTR_SIZE); }
int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) { int err; /* patch_text() only supports int-sized breakpoints */ BUILD_BUG_ON(sizeof(int) != BREAK_INSTR_SIZE); err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE); if (err) return err; patch_text((void *)bpt->bpt_addr, *(unsigned int *)arch_kgdb_ops.gdb_bpt_instr); return err; }
int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) { int err; unsigned long addr_wr = writable_address(bpt->bpt_addr); if (addr_wr == 0) return -1; err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE); if (err) return err; err = probe_kernel_write((char *)addr_wr, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE); smp_wmb(); flush_icache_range((unsigned long)bpt->bpt_addr, (unsigned long)bpt->bpt_addr + BREAK_INSTR_SIZE); return err; }
bool is_module_trampoline(u32 *p) { unsigned int i; u32 insns[ARRAY_SIZE(ppc64_stub_insns)]; BUILD_BUG_ON(sizeof(ppc64_stub_insns) != sizeof(ppc64_stub_mask)); if (probe_kernel_read(insns, p, sizeof(insns))) return -EFAULT; for (i = 0; i < ARRAY_SIZE(ppc64_stub_insns); i++) { u32 insna = insns[i]; u32 insnb = ppc64_stub_insns[i]; u32 mask = ppc64_stub_mask[i]; if ((insna & mask) != (insnb & mask)) return false; } return true; }
static int ftrace_modify_code(unsigned long ip, void *old_code, int old_size, void *new_code, int new_size) { unsigned char replaced[MCOUNT_INSN_SIZE]; /* * Note: Due to modules code can disappear and change. * We need to protect against faulting as well as code * changing. We do this by using the probe_kernel_* * functions. * This however is just a simple sanity check. */ if (probe_kernel_read(replaced, (void *)ip, old_size)) return -EFAULT; if (memcmp(replaced, old_code, old_size) != 0) return -EINVAL; if (probe_kernel_write((void *)ip, new_code, new_size)) return -EPERM; return 0; }
int __init hpfb_init(void) { unsigned int sid; unsigned char i; int err; /* Topcats can be on the internal IO bus or real DIO devices. * The internal variant sits at 0x560000; it has primary * and secondary ID registers just like the DIO version. * So we merge the two detection routines. * * Perhaps this #define should be in a global header file: * I believe it's common to all internal fbs, not just topcat. */ #define INTFBVADDR 0xf0560000 #define INTFBPADDR 0x560000 if (!MACH_IS_HP300) return -ENODEV; if (fb_get_options("hpfb", NULL)) return -ENODEV; err = dio_register_driver(&hpfb_driver); if (err) return err; err = probe_kernel_read(&i, (unsigned char *)INTFBVADDR + DIO_IDOFF, 1); if (!err && (i == DIO_ID_FBUFFER) && topcat_sid_ok(sid = DIO_SECID(INTFBVADDR))) { if (!request_mem_region(INTFBPADDR, DIO_DEVSIZE, "Internal Topcat")) return -EBUSY; printk(KERN_INFO "Internal Topcat found (secondary id %02x)\n", sid); if (hpfb_init_one(INTFBPADDR, INTFBVADDR)) { return -ENOMEM; } } return 0; }
/* * Convert the memory pointed to by mem into hex, placing result in buf. * Return a pointer to the last char put in buf (null). May return an error. */ int kgdb_mem2hex(char *mem, char *buf, int count) { char *tmp; int err; /* * We use the upper half of buf as an intermediate buffer for the * raw memory copy. Hex conversion will work against this one. */ tmp = buf + count; err = probe_kernel_read(tmp, mem, count); if (!err) { while (count > 0) { buf = pack_hex_byte(buf, *tmp); tmp++; count--; } *buf = 0; } return err; }
static int _stp_build_id_check (struct _stp_module *m, unsigned long notes_addr, struct task_struct *tsk) { int j; for (j = 0; j < m->build_id_len; j++) { /* Use set_fs / get_user to access conceivably invalid addresses. * If loc2c-runtime.h were more easily usable, a deref() loop * could do it too. */ mm_segment_t oldfs = get_fs(); int rc; unsigned char theory, practice = 0; #ifdef STAPCONF_PROBE_KERNEL if (!tsk) { theory = m->build_id_bits[j]; set_fs(KERNEL_DS); rc = probe_kernel_read(&practice, (void*)(notes_addr + j), 1); } else #endif { theory = m->build_id_bits[j]; set_fs (tsk ? USER_DS : KERNEL_DS); /* * Why check CONFIG_UTRACE here? If we're using real in-kernel * utrace, we can always just call get_user() (since we're * either reading kernel memory or tsk == current). * * Since we're only reading here, we can call * __access_process_vm_noflush(), which only calls things that * are exported. */ #ifdef CONFIG_UTRACE rc = get_user(practice, ((unsigned char*)(void*)(notes_addr + j))); #else if (!tsk || tsk == current) { rc = get_user(practice, ((unsigned char*)(void*)(notes_addr + j))); } else { rc = (__access_process_vm_noflush(tsk, (notes_addr + j), &practice, 1, 0) != 1); } #endif } set_fs(oldfs); if (rc || (theory != practice)) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27) _stp_error ("Build-id mismatch [man error::buildid]: \"%s\" byte %d (0x%02x vs 0x%02x) address %#lx rc %d\n", m->path, j, theory, practice, notes_addr, rc); return 1; #else /* This branch is a surrogate for kernels affected by Fedora bug * #465873. */ _stp_warn (KERN_WARNING "Build-id mismatch [man error::buildid]: \"%s\" byte %d (0x%02x vs 0x%02x) rc %d\n", m->path, j, theory, practice, rc); #endif break; } /* end mismatch */ } /* end per-byte check loop */ return 0; }
/* * Attempt to emulate single stepping by means of breakpoint instructions. * Although there is a single-step trace flag in EPSW, its use is not * sufficiently documented and is only intended for use with the JTAG debugger. */ static int kgdb_arch_do_singlestep(struct pt_regs *regs) { unsigned long arg; unsigned size; u8 *pc = (u8 *)regs->pc, *sp = (u8 *)(regs + 1), cur; u8 *x = NULL, *y = NULL; int ret; ret = probe_kernel_read(&cur, pc, 1); if (ret < 0) return ret; size = mn10300_kgdb_insn_sizes[cur]; if (size > 0) { x = pc + size; goto set_x; } switch (cur) { /* Bxx (d8,PC) */ case 0xc0 ... 0xca: ret = probe_kernel_read(&arg, pc + 1, 1); if (ret < 0) return ret; x = pc + 2; if (arg >= 0 && arg <= 2) goto set_x; y = pc + (s8)arg; goto set_x_and_y; /* LXX (d8,PC) */ case 0xd0 ... 0xda: x = pc + 1; if (regs->pc == regs->lar) goto set_x; y = (u8 *)regs->lar; goto set_x_and_y; /* SETLB - loads the next four bytes into the LIR register * (which mustn't include a breakpoint instruction) */ case 0xdb: x = pc + 5; goto set_x; /* JMP (d16,PC) or CALL (d16,PC) */ case 0xcc: case 0xcd: ret = probe_kernel_read(&arg, pc + 1, 2); if (ret < 0) return ret; x = pc + (s16)arg; goto set_x; /* JMP (d32,PC) or CALL (d32,PC) */ case 0xdc: case 0xdd: ret = probe_kernel_read(&arg, pc + 1, 4); if (ret < 0) return ret; x = pc + (s32)arg; goto set_x; /* RETF */ case 0xde: x = (u8 *)regs->mdr; goto set_x; /* RET */ case 0xdf: ret = probe_kernel_read(&arg, pc + 2, 1); if (ret < 0) return ret; ret = probe_kernel_read(&x, sp + (s8)arg, 4); if (ret < 0) return ret; goto set_x; case 0xf0: ret = probe_kernel_read(&cur, pc + 1, 1); if (ret < 0) return ret; if (cur >= 0xf0 && cur <= 0xf7) { /* JMP (An) / CALLS (An) */ switch (cur & 3) { case 0: x = (u8 *)regs->a0; break; case 1: x = (u8 *)regs->a1; break; case 2: x = (u8 *)regs->a2; break; case 3: x = (u8 *)regs->a3; break; } goto set_x; } else if (cur == 0xfc) { /* RETS */ ret = probe_kernel_read(&x, sp, 4); if (ret < 0) return ret; goto set_x; } else if (cur == 0xfd) { /* RTI */ ret = probe_kernel_read(&x, sp + 4, 4); if (ret < 0) return ret; goto set_x; } else { x = pc + 2; goto set_x; } break; /* potential 3-byte conditional branches */ case 0xf8: ret = probe_kernel_read(&cur, pc + 1, 1); if (ret < 0) return ret; x = pc + 3; if (cur >= 0xe8 && cur <= 0xeb) { ret = probe_kernel_read(&arg, pc + 2, 1); if (ret < 0) return ret; if (arg >= 0 && arg <= 3) goto set_x; y = pc + (s8)arg; goto set_x_and_y; } goto set_x; case 0xfa: ret = probe_kernel_read(&cur, pc + 1, 1); if (ret < 0) return ret; if (cur == 0xff) { /* CALLS (d16,PC) */ ret = probe_kernel_read(&arg, pc + 2, 2); if (ret < 0) return ret; x = pc + (s16)arg; goto set_x; } x = pc + 4; goto set_x; case 0xfc: ret = probe_kernel_read(&cur, pc + 1, 1); if (ret < 0) return ret; if (cur == 0xff) { /* CALLS (d32,PC) */ ret = probe_kernel_read(&arg, pc + 2, 4); if (ret < 0) return ret; x = pc + (s32)arg; goto set_x; } x = pc + 6; goto set_x; } return 0; set_x: kgdb_sstep_bp_addr[0] = x; kgdb_sstep_bp_addr[1] = NULL; ret = probe_kernel_read(&kgdb_sstep_bp[0], x, 1); if (ret < 0) return ret; ret = probe_kernel_write(x, &arch_kgdb_ops.gdb_bpt_instr, 1); if (ret < 0) return ret; kgdb_sstep_thread = current_thread_info(); debugger_local_cache_flushinv_one(x); return ret; set_x_and_y: kgdb_sstep_bp_addr[0] = x; kgdb_sstep_bp_addr[1] = y; ret = probe_kernel_read(&kgdb_sstep_bp[0], x, 1); if (ret < 0) return ret; ret = probe_kernel_read(&kgdb_sstep_bp[1], y, 1); if (ret < 0) return ret; ret = probe_kernel_write(x, &arch_kgdb_ops.gdb_bpt_instr, 1); if (ret < 0) return ret; ret = probe_kernel_write(y, &arch_kgdb_ops.gdb_bpt_instr, 1); if (ret < 0) { probe_kernel_write(kgdb_sstep_bp_addr[0], &kgdb_sstep_bp[0], 1); } else { kgdb_sstep_thread = current_thread_info(); } debugger_local_cache_flushinv_one(x); debugger_local_cache_flushinv_one(y); return ret; }