/* * Detect the presence of a resident Trusted OS which may cause CPU_OFF to * return DENIED (which would be fatal). */ static void __init psci_init_migrate(void) { unsigned long cpuid; int type, cpu = -1; type = psci_ops.migrate_info_type(); if (type == PSCI_0_2_TOS_MP) { pr_info("Trusted OS migration not required\n"); return; } if (type == PSCI_RET_NOT_SUPPORTED) { pr_info("MIGRATE_INFO_TYPE not supported.\n"); return; } if (type != PSCI_0_2_TOS_UP_MIGRATE && type != PSCI_0_2_TOS_UP_NO_MIGRATE) { pr_err("MIGRATE_INFO_TYPE returned unknown type (%d)\n", type); return; } cpuid = psci_migrate_info_up_cpu(); if (cpuid & ~MPIDR_HWID_BITMASK) { pr_warn("MIGRATE_INFO_UP_CPU reported invalid physical ID (0x%lx)\n", cpuid); return; } cpu = get_logical_index(cpuid); resident_cpu = cpu >= 0 ? cpu : -1; pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid); }
int arch_hibernation_header_restore(void *addr) { int ret; struct arch_hibernate_hdr_invariants invariants; struct arch_hibernate_hdr *hdr = addr; arch_hdr_invariants(&invariants); if (memcmp(&hdr->invariants, &invariants, sizeof(invariants))) { pr_crit("Hibernate image not generated by this kernel!\n"); return -EINVAL; } sleep_cpu = get_logical_index(hdr->sleep_cpu_mpidr); pr_info("Hibernated on CPU %d [mpidr:0x%llx]\n", sleep_cpu, hdr->sleep_cpu_mpidr); if (sleep_cpu < 0) { pr_crit("Hibernated on a CPU not known to this kernel!\n"); sleep_cpu = -EINVAL; return -EINVAL; } if (!cpu_online(sleep_cpu)) { pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n"); ret = cpu_up(sleep_cpu); if (ret) { pr_err("Failed to bring hibernate-CPU up!\n"); sleep_cpu = -EINVAL; return ret; } } resume_hdr = *hdr; return 0; }
void __init arch_get_fast_and_slow_cpus(struct cpumask *fast, struct cpumask *slow) { struct device_node *cn = NULL; int cpu; cpumask_clear(fast); cpumask_clear(slow); /* * Use the config options if they are given. This helps testing * HMP scheduling on systems without a big.LITTLE architecture. */ if (strlen(CONFIG_HMP_FAST_CPU_MASK) && strlen(CONFIG_HMP_SLOW_CPU_MASK)) { if (cpulist_parse(CONFIG_HMP_FAST_CPU_MASK, fast)) WARN(1, "Failed to parse HMP fast cpu mask!\n"); if (cpulist_parse(CONFIG_HMP_SLOW_CPU_MASK, slow)) WARN(1, "Failed to parse HMP slow cpu mask!\n"); return; } /* * Else, parse device tree for little cores. */ while ((cn = of_find_node_by_type(cn, "cpu"))) { const u32 *mpidr; int len; mpidr = of_get_property(cn, "reg", &len); if (!mpidr || len != 4) { pr_err("* %s missing reg property\n", cn->full_name); continue; } cpu = get_logical_index(be32_to_cpup(mpidr)); if (cpu == -EINVAL) { pr_err("couldn't get logical index for mpidr %x\n", be32_to_cpup(mpidr)); break; } if (is_little_cpu(cn)) cpumask_set_cpu(cpu, slow); else cpumask_set_cpu(cpu, fast); } if (!cpumask_empty(fast) && !cpumask_empty(slow)) return; /* * We didn't find both big and little cores so let's call all cores * fast as this will keep the system running, with all cores being * treated equal. */ cpumask_setall(fast); cpumask_clear(slow); }