static void __apply_alternatives(void *alt_region) { struct alt_instr *alt; struct alt_region *region = alt_region; u32 *origptr, *replptr; for (alt = region->begin; alt < region->end; alt++) { u32 insn; int i, nr_inst; if (!cpus_have_cap(alt->cpufeature)) continue; BUG_ON(alt->alt_len > alt->orig_len); pr_info_once("patching kernel code\n"); origptr = ALT_ORIG_PTR(alt); replptr = ALT_REPL_PTR(alt); nr_inst = alt->alt_len / sizeof(insn); for (i = 0; i < nr_inst; i++) { insn = get_alt_insn(alt, origptr + i, replptr + i); *(origptr + i) = cpu_to_le32(insn); } flush_icache_range((uintptr_t)origptr, (uintptr_t)(origptr + nr_inst)); } }
void check_cpu_capabilities(const struct arm64_cpu_capabilities *caps, const char *info) { int i; for (i = 0; caps[i].desc; i++) { if (!caps[i].matches(&caps[i])) continue; if (!cpus_have_cap(caps[i].capability)) pr_info("%s %s\n", info, caps[i].desc); cpus_set_cap(caps[i].capability); } /* second pass allows enable() to consider interacting capabilities */ for (i = 0; caps[i].desc; i++) { if (cpus_have_cap(caps[i].capability) && caps[i].enable) caps[i].enable(); } }
/* * The CPU Errata work arounds are detected and applied at boot time * and the related information is freed soon after. If the new CPU requires * an errata not detected at boot, fail this CPU. */ void verify_local_cpu_errata_workarounds(void) { const struct arm64_cpu_capabilities *caps = arm64_errata; for (; caps->matches; caps++) if (!cpus_have_cap(caps->capability) && caps->matches(caps, SCOPE_LOCAL_CPU)) { pr_crit("CPU%d: Requires work around for %s, not detected" " at boot time\n", smp_processor_id(), caps->desc ? : "an erratum"); cpu_die_early(); }
void check_local_cpu_errata(void) { struct arm64_cpu_capabilities *cpus = arm64_errata; int i; for (i = 0; cpus[i].desc; i++) { if (!cpus[i].is_affected(&cpus[i])) continue; if (!cpus_have_cap(cpus[i].capability)) pr_info("enabling workaround for %s\n", cpus[i].desc); cpus_set_cap(cpus[i].capability); } }
void update_cpu_capabilities(const struct arm_cpu_capabilities *caps, const char *info) { int i; for ( i = 0; caps[i].matches; i++ ) { if ( !caps[i].matches(&caps[i]) ) continue; if ( !cpus_have_cap(caps[i].capability) && caps[i].desc ) printk(XENLOG_INFO "%s: %s\n", info, caps[i].desc); cpus_set_cap(caps[i].capability); } }
int copy_thread(unsigned long clone_flags, unsigned long stack_start, unsigned long stk_sz, struct task_struct *p) { struct pt_regs *childregs = task_pt_regs(p); memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); if (likely(!(p->flags & PF_KTHREAD))) { *childregs = *current_pt_regs(); childregs->regs[0] = 0; /* * Read the current TLS pointer from tpidr_el0 as it may be * out-of-sync with the saved value. */ *task_user_tls(p) = read_sysreg(tpidr_el0); if (stack_start) { if (is_compat_thread(task_thread_info(p))) childregs->compat_sp = stack_start; else childregs->sp = stack_start; } /* * If a TLS pointer was passed to clone (4th argument), use it * for the new thread. */ if (clone_flags & CLONE_SETTLS) p->thread.tp_value = childregs->regs[3]; } else { memset(childregs, 0, sizeof(struct pt_regs)); childregs->pstate = PSR_MODE_EL1h; if (IS_ENABLED(CONFIG_ARM64_UAO) && cpus_have_cap(ARM64_HAS_UAO)) childregs->pstate |= PSR_UAO_BIT; p->thread.cpu_context.x19 = stack_start; p->thread.cpu_context.x20 = stk_sz; } p->thread.cpu_context.pc = (unsigned long)ret_from_fork; p->thread.cpu_context.sp = (unsigned long)childregs; ptrace_hw_copy_thread(p); return 0; }
static int __apply_alternatives(void *dummy) { struct alt_instr *alt; u32 *origptr, *replptr, *endptr; for (alt = __alt_instructions; alt < __alt_instructions_end; alt++) { if (!cpus_have_cap(alt->cpufeature)) continue; BUG_ON(alt->alt_len != alt->orig_len); pr_info_once("patching kernel code\n"); origptr = (void *)&alt->orig_offset + alt->orig_offset; endptr = (void *)origptr + alt->orig_len; replptr = (void *)&alt->alt_offset + alt->alt_offset; for (; origptr < endptr; origptr++, replptr++) BUG_ON(aarch64_insn_patch_text_nosync(origptr, *replptr)); } return 0; }
static void gic_check_cpu_features(void) { WARN_TAINT_ONCE(cpus_have_cap(ARM64_HAS_SYSREG_GIC_CPUIF), TAINT_CPU_OUT_OF_SPEC, "GICv3 system registers enabled, broken firmware!\n"); }