static int __init acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_table_lapic *processor = NULL; processor = (struct acpi_table_lapic *)header; if (BAD_MADT_ENTRY(processor, end)) return -EINVAL; acpi_table_print_madt_entry(header); /* Record local apic id only when enabled */ if (processor->flags.enabled) x86_acpiid_to_apicid[processor->acpi_id] = processor->id; /* * We need to register disabled CPU as well to permit * counting disabled CPUs. This allows us to size * cpus_possible_map more accurately, to permit * to not preallocating memory for all NR_CPUS * when we use CPU hotplug. */ mp_register_lapic(processor->id, /* APIC ID */ processor->flags.enabled); /* Enabled? */ return 0; }
static int __init acpi_parse_int_src_ovr(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_interrupt_override *intsrc = container_of(header, struct acpi_madt_interrupt_override, header); if (BAD_MADT_ENTRY(intsrc, end)) return -EINVAL; acpi_table_print_madt_entry(header); if (acpi_skip_timer_override && intsrc->source_irq == 0 && intsrc->global_irq == 2) { printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n"); return 0; } mp_override_legacy_irq(intsrc->source_irq, ACPI_MADT_GET_POLARITY(intsrc->inti_flags), ACPI_MADT_GET_TRIGGER(intsrc->inti_flags), intsrc->global_irq); return 0; }
static int __init acpi_parse_plat_int_src(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_interrupt_source *plintsrc; int vector; plintsrc = (struct acpi_madt_interrupt_source *)header; if (BAD_MADT_ENTRY(plintsrc, end)) return -EINVAL; vector = iosapic_register_platform_intr(plintsrc->type, plintsrc->global_irq, plintsrc->io_sapic_vector, plintsrc->eid, plintsrc->id, ((plintsrc->inti_flags & ACPI_MADT_POLARITY_MASK) == ACPI_MADT_POLARITY_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, ((plintsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) == ACPI_MADT_TRIGGER_EDGE) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); platform_intr_list[plintsrc->type] = vector; if (acpi_madt_rev > 1) { acpi_cpei_override = plintsrc->flags & ACPI_MADT_CPEI_OVERRIDE; } acpi_cpei_phys_cpuid = ((plintsrc->id << 8) | (plintsrc->eid)) & 0xffff; return 0; }
static int __init acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_local_apic *processor = container_of(header, struct acpi_madt_local_apic, header); bool_t enabled = 0; if (BAD_MADT_ENTRY(processor, end)) return -EINVAL; acpi_table_print_madt_entry(header); /* Record local apic id only when enabled */ if (processor->lapic_flags & ACPI_MADT_ENABLED) { x86_acpiid_to_apicid[processor->processor_id] = processor->id; enabled = 1; } /* * We need to register disabled CPU as well to permit * counting disabled CPUs. This allows us to size * cpus_possible_map more accurately, to permit * to not preallocating memory for all NR_CPUS * when we use CPU hotplug. */ mp_register_lapic(processor->id, enabled, 0); return 0; }
static int __init acpi_parse_int_src_ovr(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_table_int_src_ovr *intsrc = NULL; intsrc = (struct acpi_table_int_src_ovr *)header; if (BAD_MADT_ENTRY(intsrc, end)) return -EINVAL; acpi_table_print_madt_entry(header); if (acpi_skip_timer_override && intsrc->bus_irq == 0 && intsrc->global_irq == 2) { printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n"); return 0; } mp_override_legacy_irq(intsrc->bus_irq, intsrc->flags.polarity, intsrc->flags.trigger, intsrc->global_irq); return 0; }
static int __init acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_io_apic *ioapic = NULL; struct ioapic_domain_cfg cfg = { .type = IOAPIC_DOMAIN_DYNAMIC, .ops = &mp_ioapic_irqdomain_ops, }; ioapic = (struct acpi_madt_io_apic *)header; if (BAD_MADT_ENTRY(ioapic, end)) return -EINVAL; acpi_table_print_madt_entry(header); /* Statically assign IRQ numbers for IOAPICs hosting legacy IRQs */ if (ioapic->global_irq_base < nr_legacy_irqs()) cfg.type = IOAPIC_DOMAIN_LEGACY; mp_register_ioapic(ioapic->id, ioapic->address, ioapic->global_irq_base, &cfg); return 0; }
static int __init acpi_parse_plat_int_src ( acpi_table_entry_header *header, const unsigned long end) { struct acpi_table_plat_int_src *plintsrc; int vector; plintsrc = (struct acpi_table_plat_int_src *) header; if (BAD_MADT_ENTRY(plintsrc, end)) return -EINVAL; /* * Get vector assignment for this interrupt, set attributes, * and program the IOSAPIC routing table. */ vector = iosapic_register_platform_intr(plintsrc->type, plintsrc->global_irq, plintsrc->iosapic_vector, plintsrc->eid, plintsrc->id, (plintsrc->flags.polarity == 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, (plintsrc->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); platform_intr_list[plintsrc->type] = vector; return 0; }
static int __init acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_local_apic *processor = NULL; processor = (struct acpi_madt_local_apic *)header; if (BAD_MADT_ENTRY(processor, end)) return -EINVAL; acpi_table_print_madt_entry(header); /* Ignore invalid ID */ if (processor->id == 0xff) return 0; /* * We need to register disabled CPU as well to permit * counting disabled CPUs. This allows us to size * cpus_possible_map more accurately, to permit * to not preallocating memory for all NR_CPUS * when we use CPU hotplug. */ acpi_register_lapic(processor->id, /* APIC ID */ processor->processor_id, /* ACPI ID */ processor->lapic_flags & ACPI_MADT_ENABLED); return 0; }
static int __init acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end) { struct acpi_madt_local_x2apic *processor = NULL; processor = (struct acpi_madt_local_x2apic *)header; if (BAD_MADT_ENTRY(processor, end)) return -EINVAL; acpi_table_print_madt_entry(header); #ifdef CONFIG_X86_X2APIC /* * We need to register disabled CPU as well to permit * counting disabled CPUs. This allows us to size * cpus_possible_map more accurately, to permit * to not preallocating memory for all NR_CPUS * when we use CPU hotplug. */ acpi_register_lapic(processor->local_apic_id, /* APIC ID */ processor->lapic_flags & ACPI_MADT_ENABLED); #else printk(KERN_WARNING PREFIX "x2apic entry ignored\n"); #endif return 0; }
static int __init acpi_parse_iosapic(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_io_sapic *iosapic; iosapic = (struct acpi_madt_io_sapic *)header; if (BAD_MADT_ENTRY(iosapic, end)) return -EINVAL; return iosapic_init(iosapic->address, iosapic->global_irq_base); }
static int __init acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_nmi_source *nmi_src; nmi_src = (struct acpi_madt_nmi_source *)header; if (BAD_MADT_ENTRY(nmi_src, end)) return -EINVAL; /* TBD: Support nimsrc entries */ return 0; }
static int __init acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_local_apic_nmi *lacpi_nmi; lacpi_nmi = (struct acpi_madt_local_apic_nmi *)header; if (BAD_MADT_ENTRY(lacpi_nmi, end)) return -EINVAL; /* TBD: Support lapic_nmi entries */ return 0; }
static int __init acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header, const unsigned long end) { struct acpi_madt_generic_interrupt *processor; processor = (struct acpi_madt_generic_interrupt *)header; if (BAD_MADT_ENTRY(processor, end)) return -EINVAL; acpi_table_print_madt_entry(header); acpi_map_gic_cpu_interface(processor); return 0; }
static int __init acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_local_apic_override *lapic_addr_ovr = container_of(header, struct acpi_madt_local_apic_override, header); if (BAD_MADT_ENTRY(lapic_addr_ovr, end)) return -EINVAL; acpi_lapic_addr = lapic_addr_ovr->address; return 0; }
static int __init acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_nmi_source *nmi_src = container_of(header, struct acpi_madt_nmi_source, header); if (BAD_MADT_ENTRY(nmi_src, end)) return -EINVAL; acpi_table_print_madt_entry(header); /* TBD: Support nimsrc entries? */ return 0; }
static int __init acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_table_lapic_addr_ovr *lapic_addr_ovr = NULL; lapic_addr_ovr = (struct acpi_table_lapic_addr_ovr *)header; if (BAD_MADT_ENTRY(lapic_addr_ovr, end)) return -EINVAL; acpi_lapic_addr = lapic_addr_ovr->address; return 0; }
static int __init acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_local_apic_nmi *lapic_nmi = container_of(header, struct acpi_madt_local_apic_nmi, header); if (BAD_MADT_ENTRY(lapic_nmi, end)) return -EINVAL; acpi_table_print_madt_entry(header); if (lapic_nmi->lint != 1) printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n"); return 0; }
static int __init acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_io_apic *ioapic = container_of(header, struct acpi_madt_io_apic, header); if (BAD_MADT_ENTRY(ioapic, end)) return -EINVAL; acpi_table_print_madt_entry(header); mp_register_ioapic(ioapic->id, ioapic->address, ioapic->global_irq_base); return 0; }
static int __init acpi_parse_int_src_ovr ( acpi_table_entry_header *header, const unsigned long end) { struct acpi_table_int_src_ovr *p; p = (struct acpi_table_int_src_ovr *) header; if (BAD_MADT_ENTRY(p, end)) return -EINVAL; iosapic_override_isa_irq(p->bus_irq, p->global_irq, (p->flags.polarity == 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, (p->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); return 0; }
static int __init acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_local_apic_override *lapic_addr_ovr = NULL; lapic_addr_ovr = (struct acpi_madt_local_apic_override *)header; if (BAD_MADT_ENTRY(lapic_addr_ovr, end)) return -EINVAL; acpi_table_print_madt_entry(header); acpi_lapic_addr = lapic_addr_ovr->address; return 0; }
static int __init acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_local_apic_override *lapic; lapic = (struct acpi_madt_local_apic_override *)header; if (BAD_MADT_ENTRY(lapic, end)) return -EINVAL; if (lapic->address) { iounmap(ipi_base_addr); ipi_base_addr = ioremap(lapic->address, 0); } return 0; }
static int __init acpi_parse_sapic(struct acpi_subtable_header *header, const unsigned long end) { struct acpi_madt_local_sapic *processor = NULL; processor = (struct acpi_madt_local_sapic *)header; if (BAD_MADT_ENTRY(processor, end)) return -EINVAL; acpi_table_print_madt_entry(header); acpi_register_lapic((processor->id << 8) | processor->eid,/* APIC ID */ processor->lapic_flags & ACPI_MADT_ENABLED); return 0; }
static int __init acpi_parse_int_src_ovr(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_interrupt_override *intsrc = NULL; intsrc = (struct acpi_madt_interrupt_override *)header; if (BAD_MADT_ENTRY(intsrc, end)) return -EINVAL; acpi_table_print_madt_entry(header); if (intsrc->source_irq == acpi_gbl_FADT.sci_interrupt) { acpi_sci_ioapic_setup(intsrc->global_irq, intsrc->inti_flags & ACPI_MADT_POLARITY_MASK, (intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2); return 0; }
static int __init acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end) { struct acpi_madt_local_x2apic *processor = container_of(header, struct acpi_madt_local_x2apic, header); bool_t enabled = 0; if (BAD_MADT_ENTRY(processor, end)) return -EINVAL; acpi_table_print_madt_entry(header); /* Record local apic id only when enabled and fitting. */ if (processor->local_apic_id >= MAX_APICS || processor->uid >= MAX_MADT_ENTRIES) { printk("%sAPIC ID %#x and/or ACPI ID %#x beyond limit" " - processor ignored\n", processor->lapic_flags & ACPI_MADT_ENABLED ? KERN_WARNING "WARNING: " : KERN_INFO, processor->local_apic_id, processor->uid); /* * Must not return an error here, to prevent * acpi_table_parse_entries() from terminating early. */ return 0 /* -ENOSPC */; } if (processor->lapic_flags & ACPI_MADT_ENABLED) { x86_acpiid_to_apicid[processor->uid] = processor->local_apic_id; enabled = 1; } /* * We need to register disabled CPU as well to permit * counting disabled CPUs. This allows us to size * cpus_possible_map more accurately, to permit * to not preallocating memory for all NR_CPUS * when we use CPU hotplug. */ mp_register_lapic(processor->local_apic_id, enabled, 0); return 0; }
static int __init acpi_parse_int_src_ovr(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_interrupt_override *p; p = (struct acpi_madt_interrupt_override *)header; if (BAD_MADT_ENTRY(p, end)) return -EINVAL; iosapic_override_isa_irq(p->source_irq, p->global_irq, ((p->inti_flags & ACPI_MADT_POLARITY_MASK) == ACPI_MADT_POLARITY_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, ((p->inti_flags & ACPI_MADT_TRIGGER_MASK) == ACPI_MADT_TRIGGER_EDGE) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); return 0; }
static int __init acpi_parse_lsapic (acpi_table_entry_header *header, const unsigned long end) { struct acpi_table_lsapic *lsapic; lsapic = (struct acpi_table_lsapic *) header; if (BAD_MADT_ENTRY(lsapic, end)) return -EINVAL; if (lsapic->flags.enabled) { #ifdef CONFIG_SMP smp_boot_data.cpu_phys_id[available_cpus] = (lsapic->id << 8) | lsapic->eid; #endif ia64_acpiid_to_sapicid[lsapic->acpi_id] = (lsapic->id << 8) | lsapic->eid; ++available_cpus; } total_cpus++; return 0; }
static int __init acpi_parse_plat_int_src(acpi_table_entry_header * header, const unsigned long end) { struct acpi_table_plat_int_src *plintsrc; int vector; plintsrc = (struct acpi_table_plat_int_src *)header; if (BAD_MADT_ENTRY(plintsrc, end)) return -EINVAL; /* * Get vector assignment for this interrupt, set attributes, * and program the IOSAPIC routing table. */ vector = iosapic_register_platform_intr(plintsrc->type, plintsrc->global_irq, plintsrc->iosapic_vector, plintsrc->eid, plintsrc->id, (plintsrc->flags.polarity == 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, (plintsrc->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); platform_intr_list[plintsrc->type] = vector; if (acpi_madt_rev > 1) { acpi_cpei_override = plintsrc->plint_flags.cpei_override_flag; } /* * Save the physical id, so we can check when its being removed */ acpi_cpei_phys_cpuid = ((plintsrc->id << 8) | (plintsrc->eid)) & 0xffff; return 0; }
static int __init acpi_parse_plat_int_src(struct acpi_subtable_header * header, const unsigned long end) { struct acpi_madt_interrupt_source *plintsrc; int vector; plintsrc = (struct acpi_madt_interrupt_source *)header; if (BAD_MADT_ENTRY(plintsrc, end)) return -EINVAL; /* * Get vector assignment for this interrupt, set attributes, * and program the IOSAPIC routing table. */ vector = iosapic_register_platform_intr(plintsrc->type, plintsrc->global_irq, plintsrc->io_sapic_vector, plintsrc->eid, plintsrc->id, ((plintsrc->inti_flags & ACPI_MADT_POLARITY_MASK) == ACPI_MADT_POLARITY_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, ((plintsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) == ACPI_MADT_TRIGGER_EDGE) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); platform_intr_list[plintsrc->type] = vector; if (acpi_madt_rev > 1) { acpi_cpei_override = plintsrc->flags & ACPI_MADT_CPEI_OVERRIDE; } /* * Save the physical id, so we can check when its being removed */ acpi_cpei_phys_cpuid = ((plintsrc->id << 8) | (plintsrc->eid)) & 0xffff; return 0; }