int apic_cpu_init(struct per_cpu *cpu_data) { unsigned int apic_id = phys_processor_id(); unsigned int cpu_id = cpu_data->cpu_id; u32 ldr; printk("(APIC ID %d) ", apic_id); if (apic_id > APIC_MAX_PHYS_ID || cpu_id == CPU_ID_INVALID) return -ERANGE; if (apic_to_cpu_id[apic_id] != CPU_ID_INVALID) return -EBUSY; /* only flat mode with LDR corresponding to logical ID supported */ if (!using_x2apic) { ldr = apic_ops.read(APIC_REG_LDR); if (apic_ops.read(APIC_REG_DFR) != 0xffffffff || (ldr != 0 && ldr != 1UL << (cpu_id + XAPIC_DEST_SHIFT))) return -EIO; } apic_to_cpu_id[apic_id] = cpu_id; cpu_data->apic_id = apic_id; cpu_data->sipi_vector = -1; return 0; }
void arch_dbg_write(const char *msg) { char c; while (1) { c = *msg++; if (!c) break; while (!(inb(UART_BASE + UART_LSR) & UART_LSR_THRE)) cpu_relax(); if (panic_in_progress && panic_cpu != phys_processor_id()) break; outb(c, UART_BASE + UART_TX); } }
int apic_cpu_init(struct per_cpu *cpu_data) { unsigned int xlc = MAX((apic_ext_features() >> 16) & 0xff, APIC_REG_XLVT3 - APIC_REG_XLVT0 + 1); unsigned int apic_id = phys_processor_id(); unsigned int cpu_id = cpu_data->cpu_id; unsigned int n; u32 ldr; printk("(APIC ID %d) ", apic_id); if (apic_id > APIC_MAX_PHYS_ID || cpu_id == CPU_ID_INVALID) return trace_error(-ERANGE); if (apic_to_cpu_id[apic_id] != CPU_ID_INVALID) return trace_error(-EBUSY); /* only flat mode with LDR corresponding to logical ID supported */ if (!using_x2apic) { ldr = apic_ops.read(APIC_REG_LDR); if (apic_ops.read(APIC_REG_DFR) != 0xffffffff || (ldr != 0 && ldr != 1UL << (cpu_id + XAPIC_DEST_SHIFT))) return trace_error(-EIO); } apic_to_cpu_id[apic_id] = cpu_id; cpu_data->apic_id = apic_id; cpu_data->sipi_vector = -1; /* * Extended APIC Register Space (currently, AMD thus xAPIC only). * * Can't do it in apic_init(), as apic_ext_features() accesses * the APIC page that is only accessible after switching to * hv_paging_structs. */ for (n = 0; n < xlc; n++) apic_reserved_bits[APIC_REG_XLVT0 + n] = 0xfffef800; return 0; }