void __init arch_livepatch_init(void) { void *start, *end; start = (void *)LIVEPATCH_VMAP_START; end = (void *)LIVEPATCH_VMAP_END; vm_init_type(VMAP_XEN, start, end); cpus_set_cap(LIVEPATCH_FEATURE); }
void check_local_cpu_errata(void) { struct arm64_cpu_capabilities *cpus = arm64_errata; int i; for (i = 0; cpus[i].desc; i++) { if (!cpus[i].is_affected(&cpus[i])) continue; if (!cpus_have_cap(cpus[i].capability)) pr_info("enabling workaround for %s\n", cpus[i].desc); cpus_set_cap(cpus[i].capability); } }
void update_cpu_capabilities(const struct arm_cpu_capabilities *caps, const char *info) { int i; for ( i = 0; caps[i].matches; i++ ) { if ( !caps[i].matches(&caps[i]) ) continue; if ( !cpus_have_cap(caps[i].capability) && caps[i].desc ) printk(XENLOG_INFO "%s: %s\n", info, caps[i].desc); cpus_set_cap(caps[i].capability); } }
void check_cpu_capabilities(const struct arm64_cpu_capabilities *caps, const char *info) { int i; for (i = 0; caps[i].desc; i++) { if (!caps[i].matches(&caps[i])) continue; if (!cpus_have_cap(caps[i].capability)) pr_info("%s %s\n", info, caps[i].desc); cpus_set_cap(caps[i].capability); } /* second pass allows enable() to consider interacting capabilities */ for (i = 0; caps[i].desc; i++) { if (cpus_have_cap(caps[i].capability) && caps[i].enable) caps[i].enable(); } }