char * __init xen_memory_setup(void) { unsigned long max_pfn = xen_start_info->nr_pages; max_pfn = min(MAX_DOMAIN_PAGES, max_pfn); e820.nr_map = 0; e820_add_region(0, PFN_PHYS((u64)max_pfn), E820_RAM); /* * Even though this is normal, usable memory under Xen, reserve * ISA memory anyway because too many things think they can poke * about in there. */ e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RESERVED); /* * Reserve Xen bits: * - mfn_list * - xen_start_info * See comment above "struct start_info" in <xen/interface/xen.h> */ memblock_x86_reserve_range(__pa(xen_start_info->mfn_list), __pa(xen_start_info->pt_base), "XEN START INFO"); sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); xen_return_unused_memory(xen_start_info->nr_pages, &e820); return "Xen"; }
static char * __init visws_memory_setup(void) { long long gfx_mem_size = 8 * MB; mem_size = boot_params.alt_mem_k; if (!mem_size) { printk(KERN_WARNING "Bootloader didn't set memory size, upgrade it !\n"); mem_size = 128 * MB; } /* * this hardcodes the graphics memory to 8 MB * it really should be sized dynamically (or at least * set as a boot param) */ if (!sgivwfb_mem_size) { printk(KERN_WARNING "Defaulting to 8 MB framebuffer size\n"); sgivwfb_mem_size = 8 * MB; } /* * Trim to nearest MB */ sgivwfb_mem_size &= ~((1 << 20) - 1); sgivwfb_mem_phys = mem_size - gfx_mem_size; e820_add_region(0, LOWMEMSIZE(), E820_RAM); e820_add_region(HIGH_MEMORY, mem_size - sgivwfb_mem_size - HIGH_MEMORY, E820_RAM); e820_add_region(sgivwfb_mem_phys, sgivwfb_mem_size, E820_RESERVED); return "PROM"; }
static char * __init x86_intel_ce_memory_setup(void) { unsigned long memory_top, linux_top; /* Our only memory config option is e801, * firmware doesn't seem to bother with e820 map. */ memory_top = ONE_MB + boot_params.alt_mem_k * 1024; /* Ancient bootloaders pass garbage here, do a basic sanity check */ if (memory_top < 4 * ONE_MB) { printk(KERN_WARNING "Incorrect e801 memory size 0x%08lx, using 768 MB\n", memory_top); memory_top = 768 * ONE_MB; } e820_add_region(0, LOWMEMSIZE(), E820_RAM); e820_add_region(HIGH_MEMORY, memory_top - HIGH_MEMORY, E820_RAM); /* CEFDK uses some regions in the first 1MB for various purposes. * We hardcode superset of those regions to support different * CEFDK releases. */ #ifdef CONFIG_ACPI e820_update_range(0x10000, 0x8000, E820_RAM, E820_ACPI); #endif /* 8051 microcode can be loaded at 0x30000 or 0x40000 */ e820_update_range(0x30000, 0x20000, E820_RAM, E820_RESERVED); /* e1000 "NVRAM" */ e820_update_range(0x60000, 0x1000, E820_RAM, E820_RESERVED); linux_top = x86_intel_ce_machine_top_of_ram(memory_top); if (linux_top < memory_top) e820_update_range(linux_top, memory_top - linux_top, E820_RAM, E820_RESERVED); /* check board name for reboot and halt sequence*/ { static __initdata char tmp_cmdline[COMMAND_LINE_SIZE]; strlcpy(tmp_cmdline, boot_command_line, COMMAND_LINE_SIZE); parse_args("beyond_param", tmp_cmdline, NULL, 0, parse_pwrctl_type); } return "BIOS-e801"; }
char *__init machine_specific_memory_setup(void) { char *who; int new_nr; who = "NOT VOYAGER"; if (voyager_level == 5) { __u32 addr, length; int i; who = "Voyager-SUS"; e820.nr_map = 0; for (i = 0; voyager_memory_detect(i, &addr, &length); i++) { e820_add_region(addr, length, E820_RAM); } return who; } else if (voyager_level == 4) { __u32 tom; __u16 catbase = inb(VOYAGER_SSPB_RELOCATION_PORT) << 8; /* select the DINO config space */ outb(VOYAGER_DINO, VOYAGER_CAT_CONFIG_PORT); /* Read DINO top of memory register */ tom = ((inb(catbase + 0x4) & 0xf0) << 16) + ((inb(catbase + 0x5) & 0x7f) << 24); if (inb(catbase) != VOYAGER_DINO) { printk(KERN_ERR "Voyager: Failed to get DINO for L4, setting tom to EXT_MEM_K\n"); tom = (boot_params.screen_info.ext_mem_k) << 10; } who = "Voyager-TOM"; e820_add_region(0, 0x9f000, E820_RAM); /* map from 1M to top of memory */ e820_add_region(1 * 1024 * 1024, tom - 1 * 1024 * 1024, E820_RAM); /* FIXME: Should check the ASICs to see if I need to * take out the 8M window. Just do it at the moment * */ e820_add_region(8 * 1024 * 1024, 8 * 1024 * 1024, E820_RESERVED); return who; } return default_machine_specific_memory_setup(); }
static void xen_align_and_add_e820_region(u64 start, u64 size, int type) { u64 end = start + size; /* Align RAM regions to page boundaries. */ if (type == E820_RAM) { start = PAGE_ALIGN(start); end &= ~((u64)PAGE_SIZE - 1); } e820_add_region(start, end - start, type); }
/* Setting up memory is fairly easy. */ static __init char *lguest_memory_setup(void) { /* *The Linux bootloader header contains an "e820" memory map: the * Launcher populated the first entry with our memory limit. */ e820_add_region(boot_params.e820_map[0].addr, boot_params.e820_map[0].size, boot_params.e820_map[0].type); /* This string is for the boot messages. */ return "LGUEST"; }
static void __init xen_align_and_add_e820_region(phys_addr_t start, phys_addr_t size, int type) { phys_addr_t end = start + size; /* Align RAM regions to page boundaries. */ if (type == E820_RAM) { start = PAGE_ALIGN(start); end &= ~((phys_addr_t)PAGE_SIZE - 1); } e820_add_region(start, end - start, type); }
/* Setting up memory is fairly easy. */ static __init char *lguest_memory_setup(void) { /* We do this here and not earlier because lockcheck used to barf if we * did it before start_kernel(). I think we fixed that, so it'd be * nice to move it back to lguest_init. Patch welcome... */ atomic_notifier_chain_register(&panic_notifier_list, &paniced); /* The Linux bootloader header contains an "e820" memory map: the * Launcher populated the first entry with our memory limit. */ e820_add_region(boot_params.e820_map[0].addr, boot_params.e820_map[0].size, boot_params.e820_map[0].type); /* This string is for the boot messages. */ return "LGUEST"; }
static __init void xen_add_extra_mem(unsigned long pages) { u64 size = (u64)pages * PAGE_SIZE; u64 extra_start = xen_extra_mem_start + xen_extra_mem_size; if (!pages) return; e820_add_region(extra_start, size, E820_RAM); sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); memblock_x86_reserve_range(extra_start, extra_start + size, "XEN EXTRA"); xen_extra_mem_size += size; xen_max_p2m_pfn = PFN_DOWN(extra_start + size); }
static int __init __append_e820_map(struct e820entry *biosmap, int nr_map) { while (nr_map) { u64 start = biosmap->addr; u64 size = biosmap->size; u64 end = start + size; u32 type = biosmap->type; if (start > end) return -1; e820_add_region(start, size, type); biosmap++; nr_map--; } return 0; }
static int __init __append_e820_map(struct e820entry *biosmap, int nr_map) { while (nr_map) { u64 start = biosmap->addr; u64 size = biosmap->size; u64 end = start + size; u32 type = biosmap->type; /* Overflow in 64 bits? Ignore the memory map. */ if (start > end) return -1; e820_add_region(start, size, type); biosmap++; nr_map--; } return 0; }
static u64 __init e820_update_range_map(struct e820map *e820x, u64 start, u64 size, unsigned old_type, unsigned new_type) { int i; u64 real_updated_size = 0; BUG_ON(old_type == new_type); if (size > (ULLONG_MAX - start)) size = ULLONG_MAX - start; for (i = 0; i < e820.nr_map; i++) { struct e820entry *ei = &e820x->map[i]; u64 final_start, final_end; if (ei->type != old_type) continue; /* totally covered? */ if (ei->addr >= start && (ei->addr + ei->size) <= (start + size)) { ei->type = new_type; real_updated_size += ei->size; continue; } /* partially covered */ final_start = max(start, ei->addr); final_end = min(start + size, ei->addr + ei->size); if (final_start >= final_end) continue; e820_add_region(final_start, final_end - final_start, new_type); real_updated_size += final_end - final_start; ei->size -= final_end - final_start; if (ei->addr < final_start) continue; ei->addr = final_end; } return real_updated_size; }
static void __init xen_add_extra_mem(unsigned long pages) { unsigned long pfn; u64 size = (u64)pages * PAGE_SIZE; u64 extra_start = xen_extra_mem_start + xen_extra_mem_size; if (!pages) return; e820_add_region(extra_start, size, E820_RAM); sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); memblock_reserve(extra_start, size); xen_extra_mem_size += size; xen_max_p2m_pfn = PFN_DOWN(extra_start + size); for (pfn = PFN_DOWN(extra_start); pfn <= xen_max_p2m_pfn; pfn++) __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); }
/** * machine_specific_memory_setup - Hook for machine specific memory setup. **/ char * __init xen_memory_setup(void) { static struct e820entry map[E820MAX] __initdata; unsigned long max_pfn = xen_start_info->nr_pages; unsigned long long mem_end; int rc; struct xen_memory_map memmap; unsigned long extra_pages = 0; unsigned long extra_limit; int i; int op; max_pfn = min(MAX_DOMAIN_PAGES, max_pfn); mem_end = PFN_PHYS(max_pfn); memmap.nr_entries = E820MAX; set_xen_guest_handle(memmap.buffer, map); op = xen_initial_domain() ? XENMEM_machine_memory_map : XENMEM_memory_map; rc = HYPERVISOR_memory_op(op, &memmap); if (rc == -ENOSYS) { BUG_ON(xen_initial_domain()); memmap.nr_entries = 1; map[0].addr = 0ULL; map[0].size = mem_end; /* 8MB slack (to balance backend allocations). */ map[0].size += 8ULL << 20; map[0].type = E820_RAM; rc = 0; } BUG_ON(rc); e820.nr_map = 0; xen_extra_mem_start = mem_end; for (i = 0; i < memmap.nr_entries; i++) { unsigned long long end = map[i].addr + map[i].size; if (map[i].type == E820_RAM) { if (map[i].addr < mem_end && end > mem_end) { /* Truncate region to max_mem. */ u64 delta = end - mem_end; map[i].size -= delta; extra_pages += PFN_DOWN(delta); end = mem_end; } } if (end > xen_extra_mem_start) xen_extra_mem_start = end; /* If region is non-RAM or below mem_end, add what remains */ if ((map[i].type != E820_RAM || map[i].addr < mem_end) && map[i].size > 0) e820_add_region(map[i].addr, map[i].size, map[i].type); } /* * In domU, the ISA region is normal, usable memory, but we * reserve ISA memory anyway because too many things poke * about in there. * * In Dom0, the host E820 information can leave gaps in the * ISA range, which would cause us to release those pages. To * avoid this, we unconditionally reserve them here. */ e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RESERVED); /* * Reserve Xen bits: * - mfn_list * - xen_start_info * See comment above "struct start_info" in <xen/interface/xen.h> */ memblock_x86_reserve_range(__pa(xen_start_info->mfn_list), __pa(xen_start_info->pt_base), "XEN START INFO"); sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820); /* * Clamp the amount of extra memory to a EXTRA_MEM_RATIO * factor the base size. On non-highmem systems, the base * size is the full initial memory allocation; on highmem it * is limited to the max size of lowmem, so that it doesn't * get completely filled. * * In principle there could be a problem in lowmem systems if * the initial memory is also very large with respect to * lowmem, but we won't try to deal with that here. */ extra_limit = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), max_pfn + extra_pages); if (extra_limit >= max_pfn) extra_pages = extra_limit - max_pfn; else extra_pages = 0; if (!xen_initial_domain()) xen_add_extra_mem(extra_pages); return "Xen"; }
/** * machine_specific_memory_setup - Hook for machine specific memory setup. **/ char * __init xen_memory_setup(void) { unsigned long max_pfn, pfn_s, n_pfns; phys_addr_t mem_end, addr, size, chunk_size; u32 type; int rc; struct xen_memory_map memmap; unsigned long max_pages; unsigned long extra_pages = 0; int i; int op; xen_parse_512gb(); max_pfn = xen_get_pages_limit(); max_pfn = min(max_pfn, xen_start_info->nr_pages); mem_end = PFN_PHYS(max_pfn); memmap.nr_entries = E820MAX; set_xen_guest_handle(memmap.buffer, xen_e820_map); op = xen_initial_domain() ? XENMEM_machine_memory_map : XENMEM_memory_map; rc = HYPERVISOR_memory_op(op, &memmap); if (rc == -ENOSYS) { BUG_ON(xen_initial_domain()); memmap.nr_entries = 1; xen_e820_map[0].addr = 0ULL; xen_e820_map[0].size = mem_end; /* 8MB slack (to balance backend allocations). */ xen_e820_map[0].size += 8ULL << 20; xen_e820_map[0].type = E820_RAM; rc = 0; } BUG_ON(rc); BUG_ON(memmap.nr_entries == 0); xen_e820_map_entries = memmap.nr_entries; /* * Xen won't allow a 1:1 mapping to be created to UNUSABLE * regions, so if we're using the machine memory map leave the * region as RAM as it is in the pseudo-physical map. * * UNUSABLE regions in domUs are not handled and will need * a patch in the future. */ if (xen_initial_domain()) xen_ignore_unusable(); /* Make sure the Xen-supplied memory map is well-ordered. */ sanitize_e820_map(xen_e820_map, ARRAY_SIZE(xen_e820_map), &xen_e820_map_entries); max_pages = xen_get_max_pages(); /* How many extra pages do we need due to remapping? */ max_pages += xen_count_remap_pages(max_pfn); if (max_pages > max_pfn) extra_pages += max_pages - max_pfn; /* * Clamp the amount of extra memory to a EXTRA_MEM_RATIO * factor the base size. On non-highmem systems, the base * size is the full initial memory allocation; on highmem it * is limited to the max size of lowmem, so that it doesn't * get completely filled. * * Make sure we have no memory above max_pages, as this area * isn't handled by the p2m management. * * In principle there could be a problem in lowmem systems if * the initial memory is also very large with respect to * lowmem, but we won't try to deal with that here. */ extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), extra_pages, max_pages - max_pfn); i = 0; addr = xen_e820_map[0].addr; size = xen_e820_map[0].size; while (i < xen_e820_map_entries) { bool discard = false; chunk_size = size; type = xen_e820_map[i].type; if (type == E820_RAM) { if (addr < mem_end) { chunk_size = min(size, mem_end - addr); } else if (extra_pages) { chunk_size = min(size, PFN_PHYS(extra_pages)); pfn_s = PFN_UP(addr); n_pfns = PFN_DOWN(addr + chunk_size) - pfn_s; extra_pages -= n_pfns; xen_add_extra_mem(pfn_s, n_pfns); xen_max_p2m_pfn = pfn_s + n_pfns; } else discard = true; } if (!discard) xen_align_and_add_e820_region(addr, chunk_size, type); addr += chunk_size; size -= chunk_size; if (size == 0) { i++; if (i < xen_e820_map_entries) { addr = xen_e820_map[i].addr; size = xen_e820_map[i].size; } } } /* * Set the rest as identity mapped, in case PCI BARs are * located here. */ set_phys_range_identity(addr / PAGE_SIZE, ~0ul); /* * In domU, the ISA region is normal, usable memory, but we * reserve ISA memory anyway because too many things poke * about in there. */ e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RESERVED); sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); /* * Check whether the kernel itself conflicts with the target E820 map. * Failing now is better than running into weird problems later due * to relocating (and even reusing) pages with kernel text or data. */ if (xen_is_e820_reserved(__pa_symbol(_text), __pa_symbol(__bss_stop) - __pa_symbol(_text))) { xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n"); BUG(); } /* * Check for a conflict of the hypervisor supplied page tables with * the target E820 map. */ xen_pt_check_e820(); xen_reserve_xen_mfnlist(); /* Check for a conflict of the initrd with the target E820 map. */ if (xen_is_e820_reserved(boot_params.hdr.ramdisk_image, boot_params.hdr.ramdisk_size)) { phys_addr_t new_area, start, size; new_area = xen_find_free_area(boot_params.hdr.ramdisk_size); if (!new_area) { xen_raw_console_write("Can't find new memory area for initrd needed due to E820 map conflict\n"); BUG(); } start = boot_params.hdr.ramdisk_image; size = boot_params.hdr.ramdisk_size; xen_phys_memcpy(new_area, start, size); pr_info("initrd moved from [mem %#010llx-%#010llx] to [mem %#010llx-%#010llx]\n", start, start + size, new_area, new_area + size); memblock_free(start, size); boot_params.hdr.ramdisk_image = new_area; boot_params.ext_ramdisk_image = new_area >> 32; }
/** * machine_specific_memory_setup - Hook for machine specific memory setup. **/ char * __init xen_memory_setup(void) { static struct e820entry map[E820MAX] __initdata; static struct e820entry map_raw[E820MAX] __initdata; unsigned long max_pfn = xen_start_info->nr_pages; unsigned long long mem_end; int rc; struct xen_memory_map memmap; unsigned long extra_pages = 0; unsigned long extra_limit; unsigned long identity_pages = 0; int i; int op; max_pfn = min(MAX_DOMAIN_PAGES, max_pfn); mem_end = PFN_PHYS(max_pfn); memmap.nr_entries = E820MAX; set_xen_guest_handle(memmap.buffer, map); op = xen_initial_domain() ? XENMEM_machine_memory_map : XENMEM_memory_map; rc = HYPERVISOR_memory_op(op, &memmap); if (rc == -ENOSYS) { BUG_ON(xen_initial_domain()); memmap.nr_entries = 1; map[0].addr = 0ULL; map[0].size = mem_end; /* 8MB slack (to balance backend allocations). */ map[0].size += 8ULL << 20; map[0].type = E820_RAM; rc = 0; } BUG_ON(rc); memcpy(map_raw, map, sizeof(map)); e820.nr_map = 0; xen_extra_mem_start = mem_end; for (i = 0; i < memmap.nr_entries; i++) { unsigned long long end; /* Guard against non-page aligned E820 entries. */ if (map[i].type == E820_RAM) map[i].size -= (map[i].size + map[i].addr) % PAGE_SIZE; end = map[i].addr + map[i].size; if (map[i].type == E820_RAM && end > mem_end) { /* RAM off the end - may be partially included */ u64 delta = min(map[i].size, end - mem_end); map[i].size -= delta; end -= delta; extra_pages += PFN_DOWN(delta); /* * Set RAM below 4GB that is not for us to be unusable. * This prevents "System RAM" address space from being * used as potential resource for I/O address (happens * when 'allocate_resource' is called). */ if (delta && (xen_initial_domain() && end < 0x100000000ULL)) e820_add_region(end, delta, E820_UNUSABLE); } if (map[i].size > 0 && end > xen_extra_mem_start) xen_extra_mem_start = end; /* Add region if any remains */ if (map[i].size > 0) e820_add_region(map[i].addr, map[i].size, map[i].type); } /* Align the balloon area so that max_low_pfn does not get set * to be at the _end_ of the PCI gap at the far end (fee01000). * Note that xen_extra_mem_start gets set in the loop above to be * past the last E820 region. */ if (xen_initial_domain() && (xen_extra_mem_start < (1ULL<<32))) xen_extra_mem_start = (1ULL<<32); /* * In domU, the ISA region is normal, usable memory, but we * reserve ISA memory anyway because too many things poke * about in there. * * In Dom0, the host E820 information can leave gaps in the * ISA range, which would cause us to release those pages. To * avoid this, we unconditionally reserve them here. */ e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RESERVED); /* * Reserve Xen bits: * - mfn_list * - xen_start_info * See comment above "struct start_info" in <xen/interface/xen.h> */ memblock_reserve(__pa(xen_start_info->mfn_list), xen_start_info->pt_base - xen_start_info->mfn_list); sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); extra_limit = xen_get_max_pages(); if (max_pfn + extra_pages > extra_limit) { if (extra_limit > max_pfn) extra_pages = extra_limit - max_pfn; else extra_pages = 0; } extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820); /* * Clamp the amount of extra memory to a EXTRA_MEM_RATIO * factor the base size. On non-highmem systems, the base * size is the full initial memory allocation; on highmem it * is limited to the max size of lowmem, so that it doesn't * get completely filled. * * In principle there could be a problem in lowmem systems if * the initial memory is also very large with respect to * lowmem, but we won't try to deal with that here. */ extra_limit = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), max_pfn + extra_pages); if (extra_limit >= max_pfn) extra_pages = extra_limit - max_pfn; else extra_pages = 0; xen_add_extra_mem(extra_pages); /* * Set P2M for all non-RAM pages and E820 gaps to be identity * type PFNs. We supply it with the non-sanitized version * of the E820. */ identity_pages = xen_set_identity(map_raw, memmap.nr_entries); printk(KERN_INFO "Set %ld page(s) to 1-1 mapping.\n", identity_pages); return "Xen"; }
char * __init xen_memory_setup(void) { static struct e820entry map[E820MAX] __initdata; unsigned long max_pfn = xen_start_info->nr_pages; unsigned long long mem_end; int rc; struct xen_memory_map memmap; unsigned long max_pages; unsigned long extra_pages = 0; int i; int op; max_pfn = min(MAX_DOMAIN_PAGES, max_pfn); mem_end = PFN_PHYS(max_pfn); memmap.nr_entries = E820MAX; set_xen_guest_handle(memmap.buffer, map); op = xen_initial_domain() ? XENMEM_machine_memory_map : XENMEM_memory_map; rc = HYPERVISOR_memory_op(op, &memmap); if (rc == -ENOSYS) { BUG_ON(xen_initial_domain()); memmap.nr_entries = 1; map[0].addr = 0ULL; map[0].size = mem_end; /* */ map[0].size += 8ULL << 20; map[0].type = E820_RAM; rc = 0; } BUG_ON(rc); /* */ sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries); max_pages = xen_get_max_pages(); if (max_pages > max_pfn) extra_pages += max_pages - max_pfn; /* */ xen_released_pages = xen_set_identity_and_release( map, memmap.nr_entries, max_pfn); extra_pages += xen_released_pages; /* */ extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), extra_pages); i = 0; while (i < memmap.nr_entries) { u64 addr = map[i].addr; u64 size = map[i].size; u32 type = map[i].type; if (type == E820_RAM) { if (addr < mem_end) { size = min(size, mem_end - addr); } else if (extra_pages) { size = min(size, (u64)extra_pages * PAGE_SIZE); extra_pages -= size / PAGE_SIZE; xen_add_extra_mem(addr, size); } else type = E820_UNUSABLE; } xen_align_and_add_e820_region(addr, size, type); map[i].addr += size; map[i].size -= size; if (map[i].size == 0) i++; } /* */ e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RESERVED); /* */ memblock_reserve(__pa(xen_start_info->mfn_list), xen_start_info->pt_base - xen_start_info->mfn_list); sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); return "Xen"; }
/** * machine_specific_memory_setup - Hook for machine specific memory setup. **/ char * __init xen_memory_setup(void) { static struct e820entry map[E820MAX] __initdata; unsigned long max_pfn = xen_start_info->nr_pages; unsigned long long mem_end; int rc; struct xen_memory_map memmap; unsigned long max_pages; unsigned long last_pfn = 0; unsigned long extra_pages = 0; unsigned long populated; int i; int op; max_pfn = min(MAX_DOMAIN_PAGES, max_pfn); mem_end = PFN_PHYS(max_pfn); memmap.nr_entries = E820MAX; set_xen_guest_handle(memmap.buffer, map); op = xen_initial_domain() ? XENMEM_machine_memory_map : XENMEM_memory_map; rc = HYPERVISOR_memory_op(op, &memmap); if (rc == -ENOSYS) { BUG_ON(xen_initial_domain()); memmap.nr_entries = 1; map[0].addr = 0ULL; map[0].size = mem_end; /* 8MB slack (to balance backend allocations). */ map[0].size += 8ULL << 20; map[0].type = E820_RAM; rc = 0; } BUG_ON(rc); /* * Xen won't allow a 1:1 mapping to be created to UNUSABLE * regions, so if we're using the machine memory map leave the * region as RAM as it is in the pseudo-physical map. * * UNUSABLE regions in domUs are not handled and will need * a patch in the future. */ if (xen_initial_domain()) xen_ignore_unusable(map, memmap.nr_entries); /* Make sure the Xen-supplied memory map is well-ordered. */ sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries); max_pages = xen_get_max_pages(); if (max_pages > max_pfn) extra_pages += max_pages - max_pfn; /* * Set P2M for all non-RAM pages and E820 gaps to be identity * type PFNs. Any RAM pages that would be made inaccesible by * this are first released. */ xen_released_pages = xen_set_identity_and_release( map, memmap.nr_entries, max_pfn); /* * Populate back the non-RAM pages and E820 gaps that had been * released. */ populated = xen_populate_chunk(map, memmap.nr_entries, max_pfn, &last_pfn, xen_released_pages); xen_released_pages -= populated; extra_pages += xen_released_pages; if (last_pfn > max_pfn) { max_pfn = min(MAX_DOMAIN_PAGES, last_pfn); mem_end = PFN_PHYS(max_pfn); } /* * Clamp the amount of extra memory to a EXTRA_MEM_RATIO * factor the base size. On non-highmem systems, the base * size is the full initial memory allocation; on highmem it * is limited to the max size of lowmem, so that it doesn't * get completely filled. * * In principle there could be a problem in lowmem systems if * the initial memory is also very large with respect to * lowmem, but we won't try to deal with that here. */ extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), extra_pages); i = 0; while (i < memmap.nr_entries) { u64 addr = map[i].addr; u64 size = map[i].size; u32 type = map[i].type; if (type == E820_RAM) { if (addr < mem_end) { size = min(size, mem_end - addr); } else if (extra_pages) { size = min(size, (u64)extra_pages * PAGE_SIZE); extra_pages -= size / PAGE_SIZE; xen_add_extra_mem(addr, size); } else type = E820_UNUSABLE; } xen_align_and_add_e820_region(addr, size, type); map[i].addr += size; map[i].size -= size; if (map[i].size == 0) i++; } /* * In domU, the ISA region is normal, usable memory, but we * reserve ISA memory anyway because too many things poke * about in there. */ e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RESERVED); /* * Reserve Xen bits: * - mfn_list * - xen_start_info * See comment above "struct start_info" in <xen/interface/xen.h> * We tried to make the the memblock_reserve more selective so * that it would be clear what region is reserved. Sadly we ran * in the problem wherein on a 64-bit hypervisor with a 32-bit * initial domain, the pt_base has the cr3 value which is not * neccessarily where the pagetable starts! As Jan put it: " * Actually, the adjustment turns out to be correct: The page * tables for a 32-on-64 dom0 get allocated in the order "first L1", * "first L2", "first L3", so the offset to the page table base is * indeed 2. When reading xen/include/public/xen.h's comment * very strictly, this is not a violation (since there nothing is said * that the first thing in the page table space is pointed to by * pt_base; I admit that this seems to be implied though, namely * do I think that it is implied that the page table space is the * range [pt_base, pt_base + nt_pt_frames), whereas that * range here indeed is [pt_base - 2, pt_base - 2 + nt_pt_frames), * which - without a priori knowledge - the kernel would have * difficulty to figure out)." - so lets just fall back to the * easy way and reserve the whole region. */ memblock_reserve(__pa(xen_start_info->mfn_list), xen_start_info->pt_base - xen_start_info->mfn_list); sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); return "Xen"; }
/** * machine_specific_memory_setup - Hook for machine specific memory setup. **/ char * __init xen_memory_setup(void) { static struct e820entry map[E820MAX] __initdata; unsigned long max_pfn = xen_start_info->nr_pages; unsigned long long mem_end; int rc; struct xen_memory_map memmap; unsigned long max_pages; unsigned long last_pfn = 0; unsigned long extra_pages = 0; unsigned long populated; int i; int op; max_pfn = min(MAX_DOMAIN_PAGES, max_pfn); mem_end = PFN_PHYS(max_pfn); memmap.nr_entries = E820MAX; set_xen_guest_handle(memmap.buffer, map); op = xen_initial_domain() ? XENMEM_machine_memory_map : XENMEM_memory_map; rc = HYPERVISOR_memory_op(op, &memmap); if (rc == -ENOSYS) { BUG_ON(xen_initial_domain()); memmap.nr_entries = 1; map[0].addr = 0ULL; map[0].size = mem_end; /* 8MB slack (to balance backend allocations). */ map[0].size += 8ULL << 20; map[0].type = E820_RAM; rc = 0; } BUG_ON(rc); /* Make sure the Xen-supplied memory map is well-ordered. */ sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries); max_pages = xen_get_max_pages(); if (max_pages > max_pfn) extra_pages += max_pages - max_pfn; /* * Set P2M for all non-RAM pages and E820 gaps to be identity * type PFNs. Any RAM pages that would be made inaccesible by * this are first released. */ xen_released_pages = xen_set_identity_and_release( map, memmap.nr_entries, max_pfn); /* * Populate back the non-RAM pages and E820 gaps that had been * released. */ populated = xen_populate_chunk(map, memmap.nr_entries, max_pfn, &last_pfn, xen_released_pages); xen_released_pages -= populated; extra_pages += xen_released_pages; if (last_pfn > max_pfn) { max_pfn = min(MAX_DOMAIN_PAGES, last_pfn); mem_end = PFN_PHYS(max_pfn); } /* * Clamp the amount of extra memory to a EXTRA_MEM_RATIO * factor the base size. On non-highmem systems, the base * size is the full initial memory allocation; on highmem it * is limited to the max size of lowmem, so that it doesn't * get completely filled. * * In principle there could be a problem in lowmem systems if * the initial memory is also very large with respect to * lowmem, but we won't try to deal with that here. */ extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), extra_pages); i = 0; while (i < memmap.nr_entries) { u64 addr = map[i].addr; u64 size = map[i].size; u32 type = map[i].type; if (type == E820_RAM) { if (addr < mem_end) { size = min(size, mem_end - addr); } else if (extra_pages) { size = min(size, (u64)extra_pages * PAGE_SIZE); extra_pages -= size / PAGE_SIZE; xen_add_extra_mem(addr, size); } else type = E820_UNUSABLE; } xen_align_and_add_e820_region(addr, size, type); map[i].addr += size; map[i].size -= size; if (map[i].size == 0) i++; } /* * In domU, the ISA region is normal, usable memory, but we * reserve ISA memory anyway because too many things poke * about in there. */ e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RESERVED); /* * Reserve Xen bits: * - mfn_list * - xen_start_info * See comment above "struct start_info" in <xen/interface/xen.h> */ memblock_reserve(__pa(xen_start_info->mfn_list), xen_start_info->pt_base - xen_start_info->mfn_list); sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); return "Xen"; }