void __init update_e820(void) { u32 nr_map; nr_map = e820.nr_map; if (sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr_map)) return; e820.nr_map = nr_map; printk(KERN_INFO "modified physical RAM map:\n"); e820_print_map("modified"); }
/** * Because of the size limitation of struct boot_params, only first * 128 E820 memory entries are passed to kernel via * boot_params.e820_map, others are passed via SETUP_E820_EXT node of * linked list of struct setup_data, which is parsed here. */ void __init parse_e820_ext(struct setup_data *sdata, unsigned long pa_data) { u32 map_len; int entries; struct e820entry *extmap; entries = sdata->len / sizeof(struct e820entry); map_len = sdata->len + sizeof(struct setup_data); if (map_len > PAGE_SIZE) sdata = early_ioremap(pa_data, map_len); extmap = (struct e820entry *)(sdata->data); __append_e820_map(extmap, entries); sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); if (map_len > PAGE_SIZE) early_iounmap(sdata, map_len); printk(KERN_INFO "extended physical RAM map:\n"); e820_print_map("extended"); }
/* * Sanitize the e820 table from BIOS, and then copy it a safe place: e820_bios. * If we have an empty e820 table, we do not fake one, just panic. */ void __init setup_memory_map(void) { u32 new_nr; new_nr = boot_params.e820_nr_entries; sanitize_e820_map(boot_params.e820_map, ARRAY_SIZE(boot_params.e820_map), &new_nr); boot_params.e820_nr_entries = new_nr; if (append_e820_map(boot_params.e820_map, boot_params.e820_nr_entries) < 0) panic("e820: BIOS provides us *nothing*"); memcpy(&e820_bios, &e820, sizeof(struct e820map)); printk(KERN_INFO "e820: BIOS-provided physical RAM map:\n"); e820_print_map("BIOS-e820"); }
static __init void parse_cmdline_early (char ** cmdline_p) { char c = ' ', *to = command_line, *from = COMMAND_LINE; int len = 0; int userdef = 0; for (;;) { if (c != ' ') goto next_char; #ifdef CONFIG_SMP /* * If the BIOS enumerates physical processors before logical, * maxcpus=N at enumeration-time can be used to disable HT. */ else if (!memcmp(from, "maxcpus=", 8)) { extern unsigned int maxcpus; maxcpus = simple_strtoul(from + 8, NULL, 0); } #endif #ifdef CONFIG_ACPI /* "acpi=off" disables both ACPI table parsing and interpreter init */ if (fullarg(from,"acpi=off")) disable_acpi(); if (fullarg(from, "acpi=force")) { /* add later when we do DMI horrors: */ acpi_force = 1; acpi_disabled = 0; } /* acpi=ht just means: do ACPI MADT parsing at bootup, but don't enable the full ACPI interpreter */ if (fullarg(from, "acpi=ht")) { if (!acpi_force) disable_acpi(); acpi_ht = 1; } else if (fullarg(from, "pci=noacpi")) acpi_disable_pci(); else if (fullarg(from, "acpi=noirq")) acpi_noirq_set(); else if (fullarg(from, "acpi_sci=edge")) acpi_sci_flags.trigger = 1; else if (fullarg(from, "acpi_sci=level")) acpi_sci_flags.trigger = 3; else if (fullarg(from, "acpi_sci=high")) acpi_sci_flags.polarity = 1; else if (fullarg(from, "acpi_sci=low")) acpi_sci_flags.polarity = 3; /* acpi=strict disables out-of-spec workarounds */ else if (fullarg(from, "acpi=strict")) { acpi_strict = 1; } #ifdef CONFIG_X86_IO_APIC else if (fullarg(from, "acpi_skip_timer_override")) acpi_skip_timer_override = 1; #endif #endif #ifndef CONFIG_XEN if (fullarg(from, "disable_timer_pin_1")) disable_timer_pin_1 = 1; if (fullarg(from, "enable_timer_pin_1")) disable_timer_pin_1 = -1; if (fullarg(from, "nolapic") || fullarg(from, "disableapic")) { clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); disable_apic = 1; } if (fullarg(from, "noapic")) skip_ioapic_setup = 1; if (fullarg(from,"apic")) { skip_ioapic_setup = 0; ioapic_force = 1; } #endif if (!memcmp(from, "mem=", 4)) parse_memopt(from+4, &from); if (!memcmp(from, "memmap=", 7)) { /* exactmap option is for used defined memory */ if (!memcmp(from+7, "exactmap", 8)) { #ifdef CONFIG_CRASH_DUMP /* If we are doing a crash dump, we * still need to know the real mem * size before original memory map is * reset. */ saved_max_pfn = e820_end_of_ram(); #endif from += 8+7; end_pfn_map = 0; e820.nr_map = 0; userdef = 1; } else { parse_memmapopt(from+7, &from); userdef = 1; } } #ifdef CONFIG_NUMA if (!memcmp(from, "numa=", 5)) numa_setup(from+5); #endif if (!memcmp(from,"iommu=",6)) { iommu_setup(from+6); } if (fullarg(from,"oops=panic")) panic_on_oops = 1; if (!memcmp(from, "noexec=", 7)) nonx_setup(from + 7); #ifdef CONFIG_KEXEC /* crashkernel=size@addr specifies the location to reserve for * a crash kernel. By reserving this memory we guarantee * that linux never set's it up as a DMA target. * Useful for holding code to do something appropriate * after a kernel panic. */ else if (!memcmp(from, "crashkernel=", 12)) { unsigned long size, base; size = memparse(from+12, &from); if (*from == '@') { base = memparse(from+1, &from); /* FIXME: Do I want a sanity check * to validate the memory range? */ crashk_res.start = base; crashk_res.end = base + size - 1; } } #endif #ifdef CONFIG_PROC_VMCORE /* elfcorehdr= specifies the location of elf core header * stored by the crashed kernel. This option will be passed * by kexec loader to the capture kernel. */ else if(!memcmp(from, "elfcorehdr=", 11)) elfcorehdr_addr = memparse(from+11, &from); #endif #if defined(CONFIG_HOTPLUG_CPU) && !defined(CONFIG_XEN) else if (!memcmp(from, "additional_cpus=", 16)) setup_additional_cpus(from+16); #endif next_char: c = *(from++); if (!c) break; if (COMMAND_LINE_SIZE <= ++len) break; *(to++) = c; } if (userdef) { printk(KERN_INFO "user-defined physical RAM map:\n"); e820_print_map("user"); } *to = '\0'; *cmdline_p = command_line; }