/* * Remove emplaced single-step breakpoints, returning true if we hit one of * them. */ static bool kgdb_arch_undo_singlestep(struct pt_regs *regs) { bool hit = false; u8 *x = kgdb_sstep_bp_addr[0], *y = kgdb_sstep_bp_addr[1]; u8 opcode; if (kgdb_sstep_thread == current_thread_info()) { if (x) { if (x == (u8 *)regs->pc) hit = true; if (probe_kernel_read(&opcode, x, 1) < 0 || opcode != 0xff) BUG(); probe_kernel_write(x, &kgdb_sstep_bp[0], 1); debugger_local_cache_flushinv_one(x); } if (y) { if (y == (u8 *)regs->pc) hit = true; if (probe_kernel_read(&opcode, y, 1) < 0 || opcode != 0xff) BUG(); probe_kernel_write(y, &kgdb_sstep_bp[1], 1); debugger_local_cache_flushinv_one(y); } } kgdb_sstep_bp_addr[0] = NULL; kgdb_sstep_bp_addr[1] = NULL; kgdb_sstep_thread = NULL; return hit; }
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { /* Initial replacement of the whole mcount block */ if (addr == MCOUNT_ADDR) { if (probe_kernel_write((void *) rec->ip - MCOUNT_INSN_OFFSET, mcount_replace_code, MCOUNT_BLOCK_SIZE)) return -EPERM; return 0; } if (probe_kernel_write((void *) rec->ip, ftrace_disable_code, MCOUNT_INSN_SIZE)) return -EPERM; return 0; }
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { if (probe_kernel_write((void *) rec->ip, ftrace_enable_insn, FTRACE_INSN_SIZE)) return -EPERM; return 0; }
int ftrace_disable_ftrace_graph_caller(void) { static unsigned short offset = 0x0002; return probe_kernel_write(ftrace_graph_caller + 2, &offset, sizeof(offset)); }
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { if (probe_kernel_write((void *) rec->ip, ftrace_disable_code, MCOUNT_INSN_SIZE)) return -EPERM; return 0; }
int ftrace_enable_ftrace_graph_caller(void) { unsigned short offset; offset = ((void *) prepare_ftrace_return - (void *) ftrace_graph_caller) / 2; return probe_kernel_write(ftrace_graph_caller + 2, &offset, sizeof(offset)); }
/* * Weak aliases for breakpoint management, * can be overriden by architectures when needed: */ int __weak kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr) { int err; err = probe_kernel_read(saved_instr, (char *)addr, BREAK_INSTR_SIZE); if (err) return err; return probe_kernel_write((char *)addr, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE); }
/* * Weak aliases for breakpoint management, * can be overriden by architectures when needed: */ int __weak kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) { int err; err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE); if (err) return err; err = probe_kernel_write((char *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE); return err; }
/* * Copy the binary array pointed to by buf into mem. Fix $, #, and * 0x7d escaped with 0x7d. Return -EFAULT on failure or 0 on success. * The input buf is overwitten with the result to write to mem. */ static int kgdb_ebin2mem(char *buf, char *mem, int count) { int size = 0; char *c = buf; while (count-- > 0) { c[size] = *buf++; if (c[size] == 0x7d) c[size] = *buf++ ^ 0x20; size++; } return probe_kernel_write(mem, c, size); }
static void undo_single_step(struct pt_regs *regs) { unsigned long addr_wr; if (stepped_instr == 0) return; addr_wr = writable_address(stepped_addr); probe_kernel_write((char *)addr_wr, (char *)&stepped_instr, BREAK_INSTR_SIZE); stepped_instr = 0; smp_wmb(); flush_icache_range(stepped_addr, stepped_addr + BREAK_INSTR_SIZE); }
int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) { int err; unsigned long addr_wr = writable_address(bpt->bpt_addr); if (addr_wr == 0) return -1; err = probe_kernel_write((char *)addr_wr, (char *)bpt->saved_instr, BREAK_INSTR_SIZE); smp_wmb(); flush_icache_range((unsigned long)bpt->bpt_addr, (unsigned long)bpt->bpt_addr + BREAK_INSTR_SIZE); return err; }
/* * Replace the next instruction after the current instruction with a * breakpoint instruction. */ static void do_single_step(struct pt_regs *regs) { unsigned long addr_wr; /* Determine where the target instruction will send us to. */ stepped_addr = get_step_address(regs); probe_kernel_read((char *)&stepped_instr, (char *)stepped_addr, BREAK_INSTR_SIZE); addr_wr = writable_address(stepped_addr); probe_kernel_write((char *)addr_wr, (char *)&singlestep_insn, BREAK_INSTR_SIZE); smp_wmb(); flush_icache_range(stepped_addr, stepped_addr + BREAK_INSTR_SIZE); }
/* * Copy the binary array pointed to by buf into mem. Fix $, #, and * 0x7d escaped with 0x7d. Return a pointer to the character after * the last byte written. */ static int kgdb_ebin2mem(char *buf, char *mem, int count) { int err = 0; char c; while (count-- > 0) { c = *buf++; if (c == 0x7d) c = *buf++ ^ 0x20; err = probe_kernel_write(mem, &c, 1); if (err) break; mem++; } return err; }
/* * Convert the hex array pointed to by buf into binary to be placed in mem. * Return a pointer to the character AFTER the last byte written. * May return an error. */ int kgdb_hex2mem(char *buf, char *mem, int count) { char *tmp_raw; char *tmp_hex; /* * We use the upper half of buf as an intermediate buffer for the * raw memory that is converted from hex. */ tmp_raw = buf + count * 2; tmp_hex = tmp_raw - 1; while (tmp_hex >= buf) { tmp_raw--; *tmp_raw = hex(*tmp_hex--); *tmp_raw |= hex(*tmp_hex--) << 4; } return probe_kernel_write(mem, tmp_raw, count); }
static int ftrace_modify_code(unsigned long ip, void *old_code, int old_size, void *new_code, int new_size) { unsigned char replaced[MCOUNT_INSN_SIZE]; /* * Note: Due to modules code can disappear and change. * We need to protect against faulting as well as code * changing. We do this by using the probe_kernel_* * functions. * This however is just a simple sanity check. */ if (probe_kernel_read(replaced, (void *)ip, old_size)) return -EFAULT; if (memcmp(replaced, old_code, old_size) != 0) return -EINVAL; if (probe_kernel_write((void *)ip, new_code, new_size)) return -EPERM; return 0; }
BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src, u32, size) { /* * Ensure we're in user context which is safe for the helper to * run. This helper has no business in a kthread. * * access_ok() should prevent writing to non-user memory, but in * some situations (nommu, temporary switch, etc) access_ok() does * not provide enough validation, hence the check on KERNEL_DS. */ if (unlikely(in_interrupt() || current->flags & (PF_KTHREAD | PF_EXITING))) return -EPERM; if (unlikely(uaccess_kernel())) return -EPERM; if (!access_ok(VERIFY_WRITE, unsafe_ptr, size)) return -EPERM; return probe_kernel_write(unsafe_ptr, src, size); }
int __weak kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) { return probe_kernel_write((char *)bpt->bpt_addr, (char *)bpt->saved_instr, BREAK_INSTR_SIZE); }
int ftrace_disable_ftrace_graph_caller(void) { unsigned short opcode = 0xa7f4; return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode)); }
/* * Attempt to emulate single stepping by means of breakpoint instructions. * Although there is a single-step trace flag in EPSW, its use is not * sufficiently documented and is only intended for use with the JTAG debugger. */ static int kgdb_arch_do_singlestep(struct pt_regs *regs) { unsigned long arg; unsigned size; u8 *pc = (u8 *)regs->pc, *sp = (u8 *)(regs + 1), cur; u8 *x = NULL, *y = NULL; int ret; ret = probe_kernel_read(&cur, pc, 1); if (ret < 0) return ret; size = mn10300_kgdb_insn_sizes[cur]; if (size > 0) { x = pc + size; goto set_x; } switch (cur) { /* Bxx (d8,PC) */ case 0xc0 ... 0xca: ret = probe_kernel_read(&arg, pc + 1, 1); if (ret < 0) return ret; x = pc + 2; if (arg >= 0 && arg <= 2) goto set_x; y = pc + (s8)arg; goto set_x_and_y; /* LXX (d8,PC) */ case 0xd0 ... 0xda: x = pc + 1; if (regs->pc == regs->lar) goto set_x; y = (u8 *)regs->lar; goto set_x_and_y; /* SETLB - loads the next four bytes into the LIR register * (which mustn't include a breakpoint instruction) */ case 0xdb: x = pc + 5; goto set_x; /* JMP (d16,PC) or CALL (d16,PC) */ case 0xcc: case 0xcd: ret = probe_kernel_read(&arg, pc + 1, 2); if (ret < 0) return ret; x = pc + (s16)arg; goto set_x; /* JMP (d32,PC) or CALL (d32,PC) */ case 0xdc: case 0xdd: ret = probe_kernel_read(&arg, pc + 1, 4); if (ret < 0) return ret; x = pc + (s32)arg; goto set_x; /* RETF */ case 0xde: x = (u8 *)regs->mdr; goto set_x; /* RET */ case 0xdf: ret = probe_kernel_read(&arg, pc + 2, 1); if (ret < 0) return ret; ret = probe_kernel_read(&x, sp + (s8)arg, 4); if (ret < 0) return ret; goto set_x; case 0xf0: ret = probe_kernel_read(&cur, pc + 1, 1); if (ret < 0) return ret; if (cur >= 0xf0 && cur <= 0xf7) { /* JMP (An) / CALLS (An) */ switch (cur & 3) { case 0: x = (u8 *)regs->a0; break; case 1: x = (u8 *)regs->a1; break; case 2: x = (u8 *)regs->a2; break; case 3: x = (u8 *)regs->a3; break; } goto set_x; } else if (cur == 0xfc) { /* RETS */ ret = probe_kernel_read(&x, sp, 4); if (ret < 0) return ret; goto set_x; } else if (cur == 0xfd) { /* RTI */ ret = probe_kernel_read(&x, sp + 4, 4); if (ret < 0) return ret; goto set_x; } else { x = pc + 2; goto set_x; } break; /* potential 3-byte conditional branches */ case 0xf8: ret = probe_kernel_read(&cur, pc + 1, 1); if (ret < 0) return ret; x = pc + 3; if (cur >= 0xe8 && cur <= 0xeb) { ret = probe_kernel_read(&arg, pc + 2, 1); if (ret < 0) return ret; if (arg >= 0 && arg <= 3) goto set_x; y = pc + (s8)arg; goto set_x_and_y; } goto set_x; case 0xfa: ret = probe_kernel_read(&cur, pc + 1, 1); if (ret < 0) return ret; if (cur == 0xff) { /* CALLS (d16,PC) */ ret = probe_kernel_read(&arg, pc + 2, 2); if (ret < 0) return ret; x = pc + (s16)arg; goto set_x; } x = pc + 4; goto set_x; case 0xfc: ret = probe_kernel_read(&cur, pc + 1, 1); if (ret < 0) return ret; if (cur == 0xff) { /* CALLS (d32,PC) */ ret = probe_kernel_read(&arg, pc + 2, 4); if (ret < 0) return ret; x = pc + (s32)arg; goto set_x; } x = pc + 6; goto set_x; } return 0; set_x: kgdb_sstep_bp_addr[0] = x; kgdb_sstep_bp_addr[1] = NULL; ret = probe_kernel_read(&kgdb_sstep_bp[0], x, 1); if (ret < 0) return ret; ret = probe_kernel_write(x, &arch_kgdb_ops.gdb_bpt_instr, 1); if (ret < 0) return ret; kgdb_sstep_thread = current_thread_info(); debugger_local_cache_flushinv_one(x); return ret; set_x_and_y: kgdb_sstep_bp_addr[0] = x; kgdb_sstep_bp_addr[1] = y; ret = probe_kernel_read(&kgdb_sstep_bp[0], x, 1); if (ret < 0) return ret; ret = probe_kernel_read(&kgdb_sstep_bp[1], y, 1); if (ret < 0) return ret; ret = probe_kernel_write(x, &arch_kgdb_ops.gdb_bpt_instr, 1); if (ret < 0) return ret; ret = probe_kernel_write(y, &arch_kgdb_ops.gdb_bpt_instr, 1); if (ret < 0) { probe_kernel_write(kgdb_sstep_bp_addr[0], &kgdb_sstep_bp[0], 1); } else { kgdb_sstep_thread = current_thread_info(); } debugger_local_cache_flushinv_one(x); debugger_local_cache_flushinv_one(y); return ret; }
int __kprobes aarch64_insn_write(void *addr, u32 insn) { insn = cpu_to_le32(insn); return probe_kernel_write(addr, &insn, AARCH64_INSN_SIZE); }
int ftrace_disable_ftrace_graph_caller(void) { u8 op = 0xf4; /* set mask field to all ones */ return probe_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op)); }
int __weak kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle) { return probe_kernel_write((char *)addr, (char *)bundle, BREAK_INSTR_SIZE); }
/* * Patch the kernel code at ftrace_graph_caller location. The instruction * there is branch relative on condition. To enable the ftrace graph code * block, we simply patch the mask field of the instruction to zero and * turn the instruction into a nop. * To disable the ftrace graph code the mask field will be patched to * all ones, which turns the instruction into an unconditional branch. */ int ftrace_enable_ftrace_graph_caller(void) { u8 op = 0x04; /* set mask field to zero */ return probe_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op)); }