void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type) { void *addr = (void *)entry->code; u32 insn; if (type == JUMP_LABEL_JMP) { insn = aarch64_insn_gen_branch_imm(entry->code, entry->target, AARCH64_INSN_BRANCH_NOLINK); } else { insn = aarch64_insn_gen_nop(); } aarch64_insn_patch_text_nosync(addr, insn); }
int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt) { struct aarch64_insn_patch patch = { .text_addrs = addrs, .new_insns = insns, .insn_cnt = cnt, .cpu_count = ATOMIC_INIT(0), }; if (cnt <= 0) return -EINVAL; return stop_machine(aarch64_insn_patch_text_cb, &patch, cpu_online_mask); } int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt) { int ret; u32 insn; /* Unsafe to patch multiple instructions without synchronizaiton */ if (cnt == 1) { ret = aarch64_insn_read(addrs[0], &insn); if (ret) return ret; if (aarch64_insn_hotpatch_safe(insn, insns[0])) { /* * ARMv8 architecture doesn't guarantee all CPUs see * the new instruction after returning from function * aarch64_insn_patch_text_nosync(). So send IPIs to * all other CPUs to achieve instruction * synchronization. */ ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]); kick_all_cpus_sync(); return ret; } } return aarch64_insn_patch_text_sync(addrs, insns, cnt); }
static int __apply_alternatives(void *dummy) { struct alt_instr *alt; u32 *origptr, *replptr, *endptr; for (alt = __alt_instructions; alt < __alt_instructions_end; alt++) { if (!cpus_have_cap(alt->cpufeature)) continue; BUG_ON(alt->alt_len != alt->orig_len); pr_info_once("patching kernel code\n"); origptr = (void *)&alt->orig_offset + alt->orig_offset; endptr = (void *)origptr + alt->orig_len; replptr = (void *)&alt->alt_offset + alt->alt_offset; for (; origptr < endptr; origptr++, replptr++) BUG_ON(aarch64_insn_patch_text_nosync(origptr, *replptr)); } return 0; }
static int __kprobes aarch64_insn_patch_text_cb(void *arg) { int i, ret = 0; struct aarch64_insn_patch *pp = arg; /* The first CPU becomes master */ if (atomic_inc_return(&pp->cpu_count) == 1) { for (i = 0; ret == 0 && i < pp->insn_cnt; i++) ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i], pp->new_insns[i]); /* * aarch64_insn_patch_text_nosync() calls flush_icache_range(), * which ends with "dsb; isb" pair guaranteeing global * visibility. */ atomic_set(&pp->cpu_count, -1); } else { while (atomic_read(&pp->cpu_count) != -1) cpu_relax(); isb(); } return ret; }