int numa_cpu_node(int cpu) { int apicid = early_per_cpu(x86_cpu_to_apicid, cpu); if (apicid != BAD_APICID) return __apicid_to_node[apicid]; return NUMA_NO_NODE; }
void __init generic_bigsmp_probe(void) { unsigned int cpu; if (!probe_bigsmp()) return; apic = &apic_bigsmp; for_each_possible_cpu(cpu) { if (early_per_cpu(x86_cpu_to_logical_apicid, cpu) == BAD_APICID) continue; early_per_cpu(x86_cpu_to_logical_apicid, cpu) = bigsmp_early_logical_apicid(cpu); } pr_info("Overriding APIC driver with %s\n", apic_bigsmp.name); }
static int summit_early_logical_apicid(int cpu) { int count = 0; u8 my_id = early_per_cpu(x86_cpu_to_apicid, cpu); u8 my_cluster = APIC_CLUSTER(my_id); #ifdef CONFIG_SMP u8 lid; int i; for (count = 0, i = nr_cpu_ids; --i >= 0; ) { lid = early_per_cpu(x86_cpu_to_logical_apicid, i); if (lid != BAD_APICID && APIC_CLUSTER(lid) == my_cluster) ++count; } #endif BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT); return my_cluster | (1UL << count); }
static void summit_init_apic_ldr(void) { int cpu = smp_processor_id(); unsigned long id = early_per_cpu(x86_cpu_to_logical_apicid, cpu); unsigned long val; apic_write(APIC_DFR, SUMMIT_APIC_DFR_VALUE); val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; val |= SET_APIC_LOGICAL_ID(id); apic_write(APIC_LDR, val); }
static int summit_early_logical_apicid(int cpu) { int count = 0; u8 my_id = early_per_cpu(x86_cpu_to_apicid, cpu); u8 my_cluster = APIC_CLUSTER(my_id); #ifdef CONFIG_SMP u8 lid; int i; /* Create logical APIC IDs by counting CPUs already in cluster. */ for (count = 0, i = nr_cpu_ids; --i >= 0; ) { lid = early_per_cpu(x86_cpu_to_logical_apicid, i); if (lid != BAD_APICID && APIC_CLUSTER(lid) == my_cluster) ++count; } #endif /* We only have a 4 wide bitmap in cluster mode. If a deranged * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */ BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT); return my_cluster | (1UL << count); }
static unsigned int summit_cpu_mask_to_apicid(const struct cpumask *cpumask) { unsigned int round = 0; int cpu, apicid = 0; for_each_cpu(cpu, cpumask) { int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu); if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { printk("%s: Not a valid mask!\n", __func__); return BAD_APICID; } apicid |= new_apicid; round++; }
static inline int summit_cpu_mask_to_apicid(const struct cpumask *cpumask, unsigned int *dest_id) { unsigned int round = 0; unsigned int cpu, apicid = 0; /* * The cpus in the mask must all be on the apic cluster. */ for_each_cpu_and(cpu, cpumask, cpu_online_mask) { int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu); if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { pr_err("Not a valid mask!\n"); return -EINVAL; } apicid |= new_apicid; round++; }
static unsigned int es7000_cpu_mask_to_apicid(const struct cpumask *cpumask) { unsigned int round = 0; int cpu, uninitialized_var(apicid); /* * The cpus in the mask must all be on the apic cluster. */ for_each_cpu(cpu, cpumask) { int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu); if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { WARN(1, "Not a valid mask!"); return BAD_APICID; } apicid = new_apicid; round++; }
/** * acpi_register_lapic - register a local apic and generates a logic cpu number * @id: local apic id to register * @acpiid: ACPI id to register * @enabled: this cpu is enabled or not * * Returns the logic cpu number which maps to the local apic */ static int acpi_register_lapic(int id, u32 acpiid, u8 enabled) { unsigned int ver = 0; int cpu; if (id >= MAX_LOCAL_APIC) { printk(KERN_INFO PREFIX "skipped apicid that is too big\n"); return -EINVAL; } if (boot_cpu_physical_apicid != -1U) ver = boot_cpu_apic_version; cpu = __generic_processor_info(id, ver, enabled); if (cpu >= 0) early_per_cpu(x86_cpu_to_acpiid, cpu) = acpiid; return cpu; }
static int es7000_early_logical_apicid(int cpu) { /* on es7000, logical apicid is the same as physical */ return early_per_cpu(x86_bios_cpu_apicid, cpu); }
static int bigsmp_early_logical_apicid(int cpu) { /* on bigsmp, logical apicid is the same as physical */ return early_per_cpu(x86_cpu_to_apicid, cpu); }
void __cpuinit generic_processor_info(int apicid, int version) { int cpu; if (version == 0x0) { pr_warning("BIOS bug, APIC version is 0 for CPU#%d! " "fixing up to 0x10. (tell your hw vendor)\n", version); version = 0x10; } apic_version[apicid] = version; if (num_processors >= nr_cpu_ids) { int max = nr_cpu_ids; int thiscpu = max + disabled_cpus; pr_warning( "ACPI: NR_CPUS/possible_cpus limit of %i reached." " Processor %d/0x%x ignored.\n", max, thiscpu, apicid); disabled_cpus++; return; } num_processors++; cpu = cpumask_next_zero(-1, cpu_present_mask); if (version != apic_version[boot_cpu_physical_apicid]) WARN_ONCE(1, "ACPI: apic version mismatch, bootcpu: %x cpu %d: %x\n", apic_version[boot_cpu_physical_apicid], cpu, version); physid_set(apicid, phys_cpu_present_map); if (apicid == boot_cpu_physical_apicid) { cpu = 0; } if (apicid > max_physical_apicid) max_physical_apicid = apicid; #ifdef CONFIG_X86_32 switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_INTEL: if (num_processors > 8) def_to_bigsmp = 1; break; case X86_VENDOR_AMD: if (max_physical_apicid >= 8) def_to_bigsmp = 1; } #endif #if defined(CONFIG_SMP) || defined(CONFIG_X86_64) early_per_cpu(x86_cpu_to_apicid, cpu) = apicid; early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid; #endif set_cpu_possible(cpu, true); set_cpu_present(cpu, true); }