/* * Adjust the displacement if the instruction uses the %rip-relative * addressing mode. * If it does, Return the address of the 32-bit displacement word. * If not, return null. * Only applicable to 64-bit x86. */ static void __kprobes fix_riprel(struct kprobe *p) { #ifdef CONFIG_X86_64 struct insn insn; kernel_insn_init(&insn, p->ainsn.insn); if (insn_rip_relative(&insn)) { s64 newdisp; u8 *disp; insn_get_displacement(&insn); /* * The copied instruction uses the %rip-relative addressing * mode. Adjust the displacement for the difference between * the original location of this instruction and the location * of the copy that will actually be run. The tricky bit here * is making sure that the sign extension happens correctly in * this calculation, since we need a signed 32-bit result to * be sign-extended to 64 bits when it's added to the %rip * value and yield the same 64-bit result that the sign- * extension of the original signed 32-bit displacement would * have given. */ newdisp = (u8 *) p->addr + (s64) insn.displacement.value - (u8 *) p->ainsn.insn; BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */ disp = (u8 *) p->ainsn.insn + insn_offset_displacement(&insn); *(s32 *) disp = (s32) newdisp; } #endif }
/* * Copy an instruction and adjust the displacement if the instruction * uses the %rip-relative addressing mode. * If it does, Return the address of the 32-bit displacement word. * If not, return null. * Only applicable to 64-bit x86. */ int __copy_instruction(u8 *dest, u8 *src) { struct insn insn; kprobe_opcode_t buf[MAX_INSN_SIZE]; int length; unsigned long recovered_insn = recover_probed_instruction(buf, (unsigned long)src); if (!recovered_insn) return 0; kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE); insn_get_length(&insn); length = insn.length; /* Another subsystem puts a breakpoint, failed to recover */ if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) return 0; pax_open_kernel(); memcpy(dest, insn.kaddr, length); pax_close_kernel(); #ifdef CONFIG_X86_64 if (insn_rip_relative(&insn)) { s64 newdisp; u8 *disp; kernel_insn_init(&insn, dest, length); insn_get_displacement(&insn); /* * The copied instruction uses the %rip-relative addressing * mode. Adjust the displacement for the difference between * the original location of this instruction and the location * of the copy that will actually be run. The tricky bit here * is making sure that the sign extension happens correctly in * this calculation, since we need a signed 32-bit result to * be sign-extended to 64 bits when it's added to the %rip * value and yield the same 64-bit result that the sign- * extension of the original signed 32-bit displacement would * have given. */ newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest; if ((s64) (s32) newdisp != newdisp) { pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp); pr_err("\tSrc: %p, Dest: %p, old disp: %x\n", src, dest, insn.displacement.value); return 0; } disp = (u8 *) dest + insn_offset_displacement(&insn); pax_open_kernel(); *(s32 *) disp = (s32) newdisp; pax_close_kernel(); } #endif return length; }
/* * Copy an instruction with recovering modified instruction by kprobes * and adjust the displacement if the instruction uses the %rip-relative * addressing mode. Note that since @real will be the final place of copied * instruction, displacement must be adjust by @real, not @dest. * This returns the length of copied instruction, or 0 if it has an error. */ int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn) { kprobe_opcode_t buf[MAX_INSN_SIZE]; unsigned long recovered_insn = recover_probed_instruction(buf, (unsigned long)src); if (!recovered_insn || !insn) return 0; /* This can access kernel text if given address is not recovered */ if (probe_kernel_read(dest, (void *)recovered_insn, MAX_INSN_SIZE)) return 0; kernel_insn_init(insn, dest, MAX_INSN_SIZE); insn_get_length(insn); /* Another subsystem puts a breakpoint, failed to recover */ if (insn->opcode.bytes[0] == BREAKPOINT_INSTRUCTION) return 0; /* We should not singlestep on the exception masking instructions */ if (insn_masking_exception(insn)) return 0; #ifdef CONFIG_X86_64 /* Only x86_64 has RIP relative instructions */ if (insn_rip_relative(insn)) { s64 newdisp; u8 *disp; /* * The copied instruction uses the %rip-relative addressing * mode. Adjust the displacement for the difference between * the original location of this instruction and the location * of the copy that will actually be run. The tricky bit here * is making sure that the sign extension happens correctly in * this calculation, since we need a signed 32-bit result to * be sign-extended to 64 bits when it's added to the %rip * value and yield the same 64-bit result that the sign- * extension of the original signed 32-bit displacement would * have given. */ newdisp = (u8 *) src + (s64) insn->displacement.value - (u8 *) real; if ((s64) (s32) newdisp != newdisp) { pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp); return 0; } disp = (u8 *) dest + insn_offset_displacement(insn); *(s32 *) disp = (s32) newdisp; } #endif return insn->length; }
void copy_and_fixup_insn(struct insn *src_insn, void *dest, const struct kernsym *func) { u32 *to_fixup; unsigned long addr; BUG_ON(src_insn->length == 0); memcpy((void *)dest, (const void *)src_insn->kaddr, src_insn->length); if(src_insn->opcode.bytes[0] == OP_CALL_REL32 || src_insn->opcode.bytes[0] == OP_JMP_REL32) { addr = (unsigned long)CODE_ADDR_FROM_OFFSET( src_insn->kaddr, src_insn->length, src_insn->immediate.value); if(addr >= (unsigned long)func->addr && addr < (unsigned long)func->addr + func->size) return; to_fixup = (u32 *)((unsigned long)dest + insn_offset_immediate(src_insn)); *to_fixup = CODE_OFFSET_FROM_ADDR(dest, src_insn->length, (void *)addr); return; } #ifdef CONFIG_X86_64 if(!tpe_insn_rip_relative(src_insn)) return; addr = (unsigned long)CODE_ADDR_FROM_OFFSET( src_insn->kaddr, src_insn->length, src_insn->displacement.value); if(addr >= (unsigned long)func->addr && addr < (unsigned long)func->addr + func->size) return; to_fixup = (u32 *)((unsigned long)dest + insn_offset_displacement(src_insn)); *to_fixup = CODE_OFFSET_FROM_ADDR(dest, src_insn->length, (void *)addr); #endif return; }
/* * Copy an instruction and adjust the displacement if the instruction * uses the %rip-relative addressing mode. * If it does, Return the address of the 32-bit displacement word. * If not, return null. * Only applicable to 64-bit x86. */ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover) { struct insn insn; int ret; kprobe_opcode_t buf[MAX_INSN_SIZE]; kernel_insn_init(&insn, src); if (recover) { insn_get_opcode(&insn); if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) { ret = recover_probed_instruction(buf, (unsigned long)src); if (ret) return 0; kernel_insn_init(&insn, buf); } } insn_get_length(&insn); memcpy(dest, insn.kaddr, insn.length); #ifdef CONFIG_X86_64 if (insn_rip_relative(&insn)) { s64 newdisp; u8 *disp; kernel_insn_init(&insn, dest); insn_get_displacement(&insn); /* * The copied instruction uses the %rip-relative addressing * mode. Adjust the displacement for the difference between * the original location of this instruction and the location * of the copy that will actually be run. The tricky bit here * is making sure that the sign extension happens correctly in * this calculation, since we need a signed 32-bit result to * be sign-extended to 64 bits when it's added to the %rip * value and yield the same 64-bit result that the sign- * extension of the original signed 32-bit displacement would * have given. */ newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest; BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */ disp = (u8 *) dest + insn_offset_displacement(&insn); *(s32 *) disp = (s32) newdisp; } #endif return insn.length; }
static void print_ir_node(struct kedr_ifunc *func, struct kedr_ir_node *node, struct kedr_ir_node *start) { u8 buf[X86_MAX_INSN_SIZE]; struct insn *insn = &node->insn; u8 *pos; u8 opcode; u8 modrm; int is_mov_imm_to_reg; if (node->dest_inner != NULL) debug_util_print_ulong( offset_for_node(func, node->dest_inner), "Jump to 0x%lx\n"); memcpy(&buf[0], &node->insn_buffer[0], X86_MAX_INSN_SIZE); opcode = insn->opcode.bytes[0]; modrm = insn->modrm.bytes[0]; /* Non-zero for MOV imm32/64, %reg. */ is_mov_imm_to_reg = ((opcode == 0xc7 && X86_MODRM_REG(modrm) == 0) || (opcode >= 0xb8 && opcode <= 0xbf)); /* For the indirect near jumps using a jump table, as well as * for other instructions using similar addressing expressions * we cannot determine the address of the table in advance to * prepare the expected dump properly. Let us just put 0 here. */ if (X86_MODRM_RM(modrm) == 4 && insn->displacement.nbytes == 4) { /* SIB and disp32 are used. * [NB] If mod == 3, displacement.nbytes is 0. */ pos = buf + insn_offset_displacement(&node->insn); *(u32 *)pos = 0; } else if (opcode == 0xe8 || opcode == 0xe9 || (opcode == 0x0f && (insn->opcode.bytes[1] & 0xf0) == 0x80)) { /* same for the relative near calls and jumps */ pos = buf + insn_offset_immediate(insn); *(u32 *)pos = 0; } else if ((insn->modrm.bytes[0] & 0xc7) == 0x5) { /* same for the insns with IP-relative addressing (x86-64) * and with plain disp32 addressing (x86-32). */ pos = buf + insn_offset_displacement(insn); *(u32 *)pos = 0; } #ifdef CONFIG_X86_64 else if (start != NULL && is_mov_imm_to_reg && X86_REX_W(insn->rex_prefix.value)) { /* MOV imm64, %reg, check if imm64 is the address of * a call_info or a block_info instance */ u64 imm64 = ((u64)insn->immediate2.value << 32) | (u64)(u32)insn->immediate1.value; /* [NB] insn->immediate*.value is signed by default, so we * cast it to u32 here first to avoid sign extension which * would lead to incorrectly calculated value of 'imm64'. */ if (imm64 == (u64)(unsigned long)start->block_info) { debug_util_print_ulong(offset_for_node(func, start), "Ref. to block_info for the block at 0x%lx\n"); } if (imm64 == (u64)(unsigned long)start->call_info) { /* 'start' should be the only reference node of the * block in this case. */ debug_util_print_ulong(offset_for_node(func, start), "Ref. to call_info for the node at 0x%lx\n"); } /* Zero the immediate value anyway */ pos = buf + insn_offset_immediate(insn); *(u64 *)pos = 0; } #else /* x86-32 */ else if (start != NULL && is_mov_imm_to_reg) { /* "MOV imm32, r/m32", check if imm32 is the address of * a call_info or a block_info instance */ u32 imm32 = (u32)insn->immediate.value; if (imm32 == (u32)(unsigned long)start->block_info) { pos = buf + insn_offset_immediate(insn); *(u32 *)pos = 0; debug_util_print_ulong(offset_for_node(func, start), "Ref. to block_info for the block at 0x%lx\n"); } if (imm32 == (u32)(unsigned long)start->call_info) { pos = buf + insn_offset_immediate(insn); *(u32 *)pos = 0; /* 'start' should be the only reference node of the * block in this case. */ debug_util_print_ulong(offset_for_node(func, start), "Ref. to call_info for the node at 0x%lx\n"); } /* Zero the immediate value anyway */ pos = buf + insn_offset_immediate(insn); *(u32 *)pos = 0; } #endif else if (start == NULL && is_mov_imm_to_reg) { /* MOV imm32/imm64, %rax in the entry handler. */ pos = buf + insn_offset_immediate(insn); *(unsigned long *)pos = 0; } else if (opcode >= 0xa0 && opcode <= 0xa3) { /* direct offset MOV, zero the address */ pos = buf + insn_offset_immediate(insn); *(unsigned long *)pos = 0; } debug_util_print_ulong(offset_for_node(func, node), "0x%lx: "); debug_util_print_hex_bytes(&buf[0], insn->length); debug_util_print_string("\n\n"); }