static int __init scu_cpu_init(struct vmm_devtree_node *node, unsigned int cpu) { int rc; u32 ncores; physical_addr_t pa; struct vmm_devtree_node *scu_node; /* Map SCU base */ if (!scu_base) { scu_node = vmm_devtree_find_matching(NULL, scu_matches); if (!scu_node) { return VMM_ENODEV; } rc = vmm_devtree_regmap(scu_node, &scu_base, 0); vmm_devtree_dref_node(scu_node); if (rc) { return rc; } } /* Map clear address */ rc = vmm_devtree_read_physaddr(node, VMM_DEVTREE_CPU_CLEAR_ADDR_ATTR_NAME, &pa); if (rc) { clear_addr[cpu] = 0x0; } else { clear_addr[cpu] = pa; } /* Map release address */ rc = vmm_devtree_read_physaddr(node, VMM_DEVTREE_CPU_RELEASE_ADDR_ATTR_NAME, &pa); if (rc) { release_addr[cpu] = 0x0; } else { release_addr[cpu] = pa; } /* Check core count from SCU */ ncores = scu_get_core_count((void *)scu_base); if (ncores <= cpu) { return VMM_ENOSYS; } /* Check SCU status */ if (!scu_cpu_core_is_smp((void *)scu_base, cpu)) { return VMM_ENOSYS; } return VMM_OK; }
vmm_devtree_for_each_child(dn, cpus) { str = NULL; rc = vmm_devtree_read_string(dn, VMM_DEVTREE_DEVICE_TYPE_ATTR_NAME, &str); if (rc || !str) { continue; } if (strcmp(str, VMM_DEVTREE_DEVICE_TYPE_VAL_CPU)) { continue; } rc = vmm_devtree_read_physaddr(dn, VMM_DEVTREE_REG_ATTR_NAME, &hwid); if ((rc == VMM_OK) && ((cpus_count < 2) || (hwid == (read_mpidr() & MPIDR_HWID_BITMASK)))) { smp_logical_map(0) = hwid; break; } }
int __init arch_smp_init_cpus(void) { int rc; unsigned int i, cpu = 1; bool bootcpu_valid = false; struct vmm_devtree_node *dn, *cpus; cpus = vmm_devtree_getnode(VMM_DEVTREE_PATH_SEPARATOR_STRING "cpus"); if (!cpus) { vmm_printf("%s: Failed to find cpus node\n", __func__); return VMM_ENOTAVAIL; } dn = NULL; vmm_devtree_for_each_child(dn, cpus) { break; } if (!dn) { vmm_printf("%s: Failed to find node for boot cpu\n", __func__); vmm_devtree_dref_node(cpus); return VMM_ENODEV; } rc = vmm_devtree_read_physaddr(dn, VMM_DEVTREE_REG_ATTR_NAME, &smp_logical_map(0)); if (rc) { vmm_printf("%s: Failed to find reg property for boot cpu\n", __func__); vmm_devtree_dref_node(dn); vmm_devtree_dref_node(cpus); return rc; } smp_read_ops(dn, 0); vmm_devtree_dref_node(dn); dn = NULL; vmm_devtree_for_each_child(dn, cpus) { physical_addr_t hwid; /* * A cpu node with missing "reg" property is * considered invalid to build a smp_logical_map * entry. */ rc = vmm_devtree_read_physaddr(dn, VMM_DEVTREE_REG_ATTR_NAME, &hwid); if (rc) { vmm_printf("%s: missing reg property\n", dn->name); goto next; } /* * Non affinity bits must be set to 0 in the DT */ if (hwid & ~MPIDR_HWID_BITMASK) { vmm_printf("%s: invalid reg property\n", dn->name); goto next; } /* * Duplicate MPIDRs are a recipe for disaster. Scan * all initialized entries and check for * duplicates. If any is found just ignore the cpu. * smp_logical_map was initialized to MPIDR_INVALID to * avoid matching valid MPIDR values. */ for (i = 1; (i < cpu) && (i < CONFIG_CPU_COUNT); i++) { if (smp_logical_map(i) == hwid) { vmm_printf("%s: duplicate cpu reg properties" " in the DT\n", dn->name); goto next; } } /* * The numbering scheme requires that the boot CPU * must be assigned logical id 0. Record it so that * the logical map built from DT is validated and can * be used. */ if (hwid == smp_logical_map(0)) { if (bootcpu_valid) { vmm_printf("%s: duplicate boot cpu reg property" " in DT\n", dn->name); goto next; } bootcpu_valid = TRUE; /* * smp_logical_map has already been * initialized and the boot cpu doesn't need * the enable-method so continue without * incrementing cpu. */ continue; } if (cpu >= CONFIG_CPU_COUNT) goto next; if (smp_read_ops(dn, cpu) != 0) goto next; if (smp_cpu_ops[cpu]->cpu_init(dn, cpu)) goto next; DPRINTF("%s: smp logical map CPU%0 -> HWID 0x%llx\n", __func__, cpu, hwid); smp_logical_map(cpu) = hwid; next: cpu++; }