char * __init machine_specific_memory_setup(void) { char *who; who = "BIOS-e820"; /* * Try to copy the BIOS-supplied E820-map. * * Otherwise fake a memory map; one section from 0k->640k, * the next section from 1mb->appropriate_mem_k */ sanitize_e820_map(boot_params.e820_map, &boot_params.e820_entries); if (copy_e820_map(boot_params.e820_map, boot_params.e820_entries) < 0) { unsigned long mem_size; /* compare results from other methods and take the greater */ if (boot_params.alt_mem_k < boot_params.screen_info.ext_mem_k) { mem_size = boot_params.screen_info.ext_mem_k; who = "BIOS-88"; } else { mem_size = boot_params.alt_mem_k; who = "BIOS-e801"; } e820.nr_map = 0; add_memory_region(0, LOWMEMSIZE(), E820_RAM); add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM); } return who; }
char * __init xen_memory_setup(void) { unsigned long max_pfn = xen_start_info->nr_pages; max_pfn = min(MAX_DOMAIN_PAGES, max_pfn); e820.nr_map = 0; e820_add_region(0, PFN_PHYS((u64)max_pfn), E820_RAM); /* * Even though this is normal, usable memory under Xen, reserve * ISA memory anyway because too many things think they can poke * about in there. */ e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RESERVED); /* * Reserve Xen bits: * - mfn_list * - xen_start_info * See comment above "struct start_info" in <xen/interface/xen.h> */ memblock_x86_reserve_range(__pa(xen_start_info->mfn_list), __pa(xen_start_info->pt_base), "XEN START INFO"); sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); xen_return_unused_memory(xen_start_info->nr_pages, &e820); return "Xen"; }
static void __init update_e820_saved(void) { u32 nr_map; nr_map = e820_saved.nr_map; if (sanitize_e820_map(e820_saved.map, ARRAY_SIZE(e820_saved.map), &nr_map)) return; e820_saved.nr_map = nr_map; }
void __init update_e820(void) { u32 nr_map; nr_map = e820.nr_map; if (sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr_map)) return; e820.nr_map = nr_map; printk(KERN_INFO "modified physical RAM map:\n"); e820_print_map("modified"); }
static void memsize_820() { int i, n, nr; struct e820entry nm[E820MAX]; unsigned long long start; unsigned long long end; /* Clean up, adjust and copy the BIOS-supplied E820-map. */ nr = sanitize_e820_map(e820, nm, e820_nr); /* If there is not a good 820 map use the BIOS 801/88 info */ if (nr < 1 || nr > E820MAX) { memsize_801(); return; } /* Build the memory map for testing */ n = 0; for (i=0; i<nr; i++) { if (nm[i].type == E820_RAM || nm[i].type == E820_ACPI) { start = nm[i].addr; end = start + nm[i].size; /* Don't ever use memory between 640 and 1024k */ if (start > RES_START && start < RES_END) { if (end < RES_END) { continue; } start = RES_END; } if (end > RES_START && end < RES_END) { end = RES_START; } v->pmap[n].start = (start + 4095) >> 12; v->pmap[n].end = end >> 12; v->test_pages += v->pmap[n].end - v->pmap[n].start; n++; #if 0 int epmap = 0; int lpmap = 0; if(n > 12) { epmap = 34; lpmap = -12; } hprint (11+n+lpmap,0+epmap,v->pmap[n-1].start); hprint (11+n+lpmap,10+epmap,v->pmap[n-1].end); hprint (11+n+lpmap,20+epmap,v->pmap[n-1].end - v->pmap[n-1].start); dprint (11+n+lpmap,30+epmap,nm[i].type,0,0); #endif } }
static __init void xen_add_extra_mem(unsigned long pages) { u64 size = (u64)pages * PAGE_SIZE; u64 extra_start = xen_extra_mem_start + xen_extra_mem_size; if (!pages) return; e820_add_region(extra_start, size, E820_RAM); sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); memblock_x86_reserve_range(extra_start, extra_start + size, "XEN EXTRA"); xen_extra_mem_size += size; xen_max_p2m_pfn = PFN_DOWN(extra_start + size); }
/** * Because of the size limitation of struct boot_params, only first * 128 E820 memory entries are passed to kernel via * boot_params.e820_map, others are passed via SETUP_E820_EXT node of * linked list of struct setup_data, which is parsed here. */ void __init parse_e820_ext(struct setup_data *sdata, unsigned long pa_data) { u32 map_len; int entries; struct e820entry *extmap; entries = sdata->len / sizeof(struct e820entry); map_len = sdata->len + sizeof(struct setup_data); if (map_len > PAGE_SIZE) sdata = early_ioremap(pa_data, map_len); extmap = (struct e820entry *)(sdata->data); __append_e820_map(extmap, entries); sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); if (map_len > PAGE_SIZE) early_iounmap(sdata, map_len); printk(KERN_INFO "extended physical RAM map:\n"); e820_print_map("extended"); }
/* * Sanitize the e820 table from BIOS, and then copy it a safe place: e820_bios. * If we have an empty e820 table, we do not fake one, just panic. */ void __init setup_memory_map(void) { u32 new_nr; new_nr = boot_params.e820_nr_entries; sanitize_e820_map(boot_params.e820_map, ARRAY_SIZE(boot_params.e820_map), &new_nr); boot_params.e820_nr_entries = new_nr; if (append_e820_map(boot_params.e820_map, boot_params.e820_nr_entries) < 0) panic("e820: BIOS provides us *nothing*"); memcpy(&e820_bios, &e820, sizeof(struct e820map)); printk(KERN_INFO "e820: BIOS-provided physical RAM map:\n"); e820_print_map("BIOS-e820"); }
static void __init xen_add_extra_mem(unsigned long pages) { unsigned long pfn; u64 size = (u64)pages * PAGE_SIZE; u64 extra_start = xen_extra_mem_start + xen_extra_mem_size; if (!pages) return; e820_add_region(extra_start, size, E820_RAM); sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); memblock_reserve(extra_start, size); xen_extra_mem_size += size; xen_max_p2m_pfn = PFN_DOWN(extra_start + size); for (pfn = PFN_DOWN(extra_start); pfn <= xen_max_p2m_pfn; pfn++) __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); }
/** * machine_specific_memory_setup - Hook for machine specific memory setup. **/ char * __init xen_memory_setup(void) { static struct e820entry map[E820MAX] __initdata; static struct e820entry map_raw[E820MAX] __initdata; unsigned long max_pfn = xen_start_info->nr_pages; unsigned long long mem_end; int rc; struct xen_memory_map memmap; unsigned long extra_pages = 0; unsigned long extra_limit; unsigned long identity_pages = 0; int i; int op; max_pfn = min(MAX_DOMAIN_PAGES, max_pfn); mem_end = PFN_PHYS(max_pfn); memmap.nr_entries = E820MAX; set_xen_guest_handle(memmap.buffer, map); op = xen_initial_domain() ? XENMEM_machine_memory_map : XENMEM_memory_map; rc = HYPERVISOR_memory_op(op, &memmap); if (rc == -ENOSYS) { BUG_ON(xen_initial_domain()); memmap.nr_entries = 1; map[0].addr = 0ULL; map[0].size = mem_end; /* 8MB slack (to balance backend allocations). */ map[0].size += 8ULL << 20; map[0].type = E820_RAM; rc = 0; } BUG_ON(rc); memcpy(map_raw, map, sizeof(map)); e820.nr_map = 0; xen_extra_mem_start = mem_end; for (i = 0; i < memmap.nr_entries; i++) { unsigned long long end; /* Guard against non-page aligned E820 entries. */ if (map[i].type == E820_RAM) map[i].size -= (map[i].size + map[i].addr) % PAGE_SIZE; end = map[i].addr + map[i].size; if (map[i].type == E820_RAM && end > mem_end) { /* RAM off the end - may be partially included */ u64 delta = min(map[i].size, end - mem_end); map[i].size -= delta; end -= delta; extra_pages += PFN_DOWN(delta); /* * Set RAM below 4GB that is not for us to be unusable. * This prevents "System RAM" address space from being * used as potential resource for I/O address (happens * when 'allocate_resource' is called). */ if (delta && (xen_initial_domain() && end < 0x100000000ULL)) e820_add_region(end, delta, E820_UNUSABLE); } if (map[i].size > 0 && end > xen_extra_mem_start) xen_extra_mem_start = end; /* Add region if any remains */ if (map[i].size > 0) e820_add_region(map[i].addr, map[i].size, map[i].type); } /* Align the balloon area so that max_low_pfn does not get set * to be at the _end_ of the PCI gap at the far end (fee01000). * Note that xen_extra_mem_start gets set in the loop above to be * past the last E820 region. */ if (xen_initial_domain() && (xen_extra_mem_start < (1ULL<<32))) xen_extra_mem_start = (1ULL<<32); /* * In domU, the ISA region is normal, usable memory, but we * reserve ISA memory anyway because too many things poke * about in there. * * In Dom0, the host E820 information can leave gaps in the * ISA range, which would cause us to release those pages. To * avoid this, we unconditionally reserve them here. */ e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RESERVED); /* * Reserve Xen bits: * - mfn_list * - xen_start_info * See comment above "struct start_info" in <xen/interface/xen.h> */ memblock_reserve(__pa(xen_start_info->mfn_list), xen_start_info->pt_base - xen_start_info->mfn_list); sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); extra_limit = xen_get_max_pages(); if (max_pfn + extra_pages > extra_limit) { if (extra_limit > max_pfn) extra_pages = extra_limit - max_pfn; else extra_pages = 0; } extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820); /* * Clamp the amount of extra memory to a EXTRA_MEM_RATIO * factor the base size. On non-highmem systems, the base * size is the full initial memory allocation; on highmem it * is limited to the max size of lowmem, so that it doesn't * get completely filled. * * In principle there could be a problem in lowmem systems if * the initial memory is also very large with respect to * lowmem, but we won't try to deal with that here. */ extra_limit = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), max_pfn + extra_pages); if (extra_limit >= max_pfn) extra_pages = extra_limit - max_pfn; else extra_pages = 0; xen_add_extra_mem(extra_pages); /* * Set P2M for all non-RAM pages and E820 gaps to be identity * type PFNs. We supply it with the non-sanitized version * of the E820. */ identity_pages = xen_set_identity(map_raw, memmap.nr_entries); printk(KERN_INFO "Set %ld page(s) to 1-1 mapping.\n", identity_pages); return "Xen"; }
/** * machine_specific_memory_setup - Hook for machine specific memory setup. **/ char * __init xen_memory_setup(void) { static struct e820entry map[E820MAX] __initdata; unsigned long max_pfn = xen_start_info->nr_pages; unsigned long long mem_end; int rc; struct xen_memory_map memmap; unsigned long max_pages; unsigned long last_pfn = 0; unsigned long extra_pages = 0; unsigned long populated; int i; int op; max_pfn = min(MAX_DOMAIN_PAGES, max_pfn); mem_end = PFN_PHYS(max_pfn); memmap.nr_entries = E820MAX; set_xen_guest_handle(memmap.buffer, map); op = xen_initial_domain() ? XENMEM_machine_memory_map : XENMEM_memory_map; rc = HYPERVISOR_memory_op(op, &memmap); if (rc == -ENOSYS) { BUG_ON(xen_initial_domain()); memmap.nr_entries = 1; map[0].addr = 0ULL; map[0].size = mem_end; /* 8MB slack (to balance backend allocations). */ map[0].size += 8ULL << 20; map[0].type = E820_RAM; rc = 0; } BUG_ON(rc); /* * Xen won't allow a 1:1 mapping to be created to UNUSABLE * regions, so if we're using the machine memory map leave the * region as RAM as it is in the pseudo-physical map. * * UNUSABLE regions in domUs are not handled and will need * a patch in the future. */ if (xen_initial_domain()) xen_ignore_unusable(map, memmap.nr_entries); /* Make sure the Xen-supplied memory map is well-ordered. */ sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries); max_pages = xen_get_max_pages(); if (max_pages > max_pfn) extra_pages += max_pages - max_pfn; /* * Set P2M for all non-RAM pages and E820 gaps to be identity * type PFNs. Any RAM pages that would be made inaccesible by * this are first released. */ xen_released_pages = xen_set_identity_and_release( map, memmap.nr_entries, max_pfn); /* * Populate back the non-RAM pages and E820 gaps that had been * released. */ populated = xen_populate_chunk(map, memmap.nr_entries, max_pfn, &last_pfn, xen_released_pages); xen_released_pages -= populated; extra_pages += xen_released_pages; if (last_pfn > max_pfn) { max_pfn = min(MAX_DOMAIN_PAGES, last_pfn); mem_end = PFN_PHYS(max_pfn); } /* * Clamp the amount of extra memory to a EXTRA_MEM_RATIO * factor the base size. On non-highmem systems, the base * size is the full initial memory allocation; on highmem it * is limited to the max size of lowmem, so that it doesn't * get completely filled. * * In principle there could be a problem in lowmem systems if * the initial memory is also very large with respect to * lowmem, but we won't try to deal with that here. */ extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), extra_pages); i = 0; while (i < memmap.nr_entries) { u64 addr = map[i].addr; u64 size = map[i].size; u32 type = map[i].type; if (type == E820_RAM) { if (addr < mem_end) { size = min(size, mem_end - addr); } else if (extra_pages) { size = min(size, (u64)extra_pages * PAGE_SIZE); extra_pages -= size / PAGE_SIZE; xen_add_extra_mem(addr, size); } else type = E820_UNUSABLE; } xen_align_and_add_e820_region(addr, size, type); map[i].addr += size; map[i].size -= size; if (map[i].size == 0) i++; } /* * In domU, the ISA region is normal, usable memory, but we * reserve ISA memory anyway because too many things poke * about in there. */ e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RESERVED); /* * Reserve Xen bits: * - mfn_list * - xen_start_info * See comment above "struct start_info" in <xen/interface/xen.h> * We tried to make the the memblock_reserve more selective so * that it would be clear what region is reserved. Sadly we ran * in the problem wherein on a 64-bit hypervisor with a 32-bit * initial domain, the pt_base has the cr3 value which is not * neccessarily where the pagetable starts! As Jan put it: " * Actually, the adjustment turns out to be correct: The page * tables for a 32-on-64 dom0 get allocated in the order "first L1", * "first L2", "first L3", so the offset to the page table base is * indeed 2. When reading xen/include/public/xen.h's comment * very strictly, this is not a violation (since there nothing is said * that the first thing in the page table space is pointed to by * pt_base; I admit that this seems to be implied though, namely * do I think that it is implied that the page table space is the * range [pt_base, pt_base + nt_pt_frames), whereas that * range here indeed is [pt_base - 2, pt_base - 2 + nt_pt_frames), * which - without a priori knowledge - the kernel would have * difficulty to figure out)." - so lets just fall back to the * easy way and reserve the whole region. */ memblock_reserve(__pa(xen_start_info->mfn_list), xen_start_info->pt_base - xen_start_info->mfn_list); sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); return "Xen"; }
/** * machine_specific_memory_setup - Hook for machine specific memory setup. **/ char * __init xen_memory_setup(void) { static struct e820entry map[E820MAX] __initdata; unsigned long max_pfn = xen_start_info->nr_pages; unsigned long long mem_end; int rc; struct xen_memory_map memmap; unsigned long max_pages; unsigned long last_pfn = 0; unsigned long extra_pages = 0; unsigned long populated; int i; int op; max_pfn = min(MAX_DOMAIN_PAGES, max_pfn); mem_end = PFN_PHYS(max_pfn); memmap.nr_entries = E820MAX; set_xen_guest_handle(memmap.buffer, map); op = xen_initial_domain() ? XENMEM_machine_memory_map : XENMEM_memory_map; rc = HYPERVISOR_memory_op(op, &memmap); if (rc == -ENOSYS) { BUG_ON(xen_initial_domain()); memmap.nr_entries = 1; map[0].addr = 0ULL; map[0].size = mem_end; /* 8MB slack (to balance backend allocations). */ map[0].size += 8ULL << 20; map[0].type = E820_RAM; rc = 0; } BUG_ON(rc); /* Make sure the Xen-supplied memory map is well-ordered. */ sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries); max_pages = xen_get_max_pages(); if (max_pages > max_pfn) extra_pages += max_pages - max_pfn; /* * Set P2M for all non-RAM pages and E820 gaps to be identity * type PFNs. Any RAM pages that would be made inaccesible by * this are first released. */ xen_released_pages = xen_set_identity_and_release( map, memmap.nr_entries, max_pfn); /* * Populate back the non-RAM pages and E820 gaps that had been * released. */ populated = xen_populate_chunk(map, memmap.nr_entries, max_pfn, &last_pfn, xen_released_pages); xen_released_pages -= populated; extra_pages += xen_released_pages; if (last_pfn > max_pfn) { max_pfn = min(MAX_DOMAIN_PAGES, last_pfn); mem_end = PFN_PHYS(max_pfn); } /* * Clamp the amount of extra memory to a EXTRA_MEM_RATIO * factor the base size. On non-highmem systems, the base * size is the full initial memory allocation; on highmem it * is limited to the max size of lowmem, so that it doesn't * get completely filled. * * In principle there could be a problem in lowmem systems if * the initial memory is also very large with respect to * lowmem, but we won't try to deal with that here. */ extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), extra_pages); i = 0; while (i < memmap.nr_entries) { u64 addr = map[i].addr; u64 size = map[i].size; u32 type = map[i].type; if (type == E820_RAM) { if (addr < mem_end) { size = min(size, mem_end - addr); } else if (extra_pages) { size = min(size, (u64)extra_pages * PAGE_SIZE); extra_pages -= size / PAGE_SIZE; xen_add_extra_mem(addr, size); } else type = E820_UNUSABLE; } xen_align_and_add_e820_region(addr, size, type); map[i].addr += size; map[i].size -= size; if (map[i].size == 0) i++; } /* * In domU, the ISA region is normal, usable memory, but we * reserve ISA memory anyway because too many things poke * about in there. */ e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RESERVED); /* * Reserve Xen bits: * - mfn_list * - xen_start_info * See comment above "struct start_info" in <xen/interface/xen.h> */ memblock_reserve(__pa(xen_start_info->mfn_list), xen_start_info->pt_base - xen_start_info->mfn_list); sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); return "Xen"; }
char * __init xen_memory_setup(void) { static struct e820entry map[E820MAX] __initdata; unsigned long max_pfn = xen_start_info->nr_pages; unsigned long long mem_end; int rc; struct xen_memory_map memmap; unsigned long max_pages; unsigned long extra_pages = 0; int i; int op; max_pfn = min(MAX_DOMAIN_PAGES, max_pfn); mem_end = PFN_PHYS(max_pfn); memmap.nr_entries = E820MAX; set_xen_guest_handle(memmap.buffer, map); op = xen_initial_domain() ? XENMEM_machine_memory_map : XENMEM_memory_map; rc = HYPERVISOR_memory_op(op, &memmap); if (rc == -ENOSYS) { BUG_ON(xen_initial_domain()); memmap.nr_entries = 1; map[0].addr = 0ULL; map[0].size = mem_end; /* */ map[0].size += 8ULL << 20; map[0].type = E820_RAM; rc = 0; } BUG_ON(rc); /* */ sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries); max_pages = xen_get_max_pages(); if (max_pages > max_pfn) extra_pages += max_pages - max_pfn; /* */ xen_released_pages = xen_set_identity_and_release( map, memmap.nr_entries, max_pfn); extra_pages += xen_released_pages; /* */ extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), extra_pages); i = 0; while (i < memmap.nr_entries) { u64 addr = map[i].addr; u64 size = map[i].size; u32 type = map[i].type; if (type == E820_RAM) { if (addr < mem_end) { size = min(size, mem_end - addr); } else if (extra_pages) { size = min(size, (u64)extra_pages * PAGE_SIZE); extra_pages -= size / PAGE_SIZE; xen_add_extra_mem(addr, size); } else type = E820_UNUSABLE; } xen_align_and_add_e820_region(addr, size, type); map[i].addr += size; map[i].size -= size; if (map[i].size == 0) i++; } /* */ e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RESERVED); /* */ memblock_reserve(__pa(xen_start_info->mfn_list), xen_start_info->pt_base - xen_start_info->mfn_list); sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); return "Xen"; }
/** * machine_specific_memory_setup - Hook for machine specific memory setup. **/ char * __init xen_memory_setup(void) { unsigned long max_pfn, pfn_s, n_pfns; phys_addr_t mem_end, addr, size, chunk_size; u32 type; int rc; struct xen_memory_map memmap; unsigned long max_pages; unsigned long extra_pages = 0; int i; int op; xen_parse_512gb(); max_pfn = xen_get_pages_limit(); max_pfn = min(max_pfn, xen_start_info->nr_pages); mem_end = PFN_PHYS(max_pfn); memmap.nr_entries = E820MAX; set_xen_guest_handle(memmap.buffer, xen_e820_map); op = xen_initial_domain() ? XENMEM_machine_memory_map : XENMEM_memory_map; rc = HYPERVISOR_memory_op(op, &memmap); if (rc == -ENOSYS) { BUG_ON(xen_initial_domain()); memmap.nr_entries = 1; xen_e820_map[0].addr = 0ULL; xen_e820_map[0].size = mem_end; /* 8MB slack (to balance backend allocations). */ xen_e820_map[0].size += 8ULL << 20; xen_e820_map[0].type = E820_RAM; rc = 0; } BUG_ON(rc); BUG_ON(memmap.nr_entries == 0); xen_e820_map_entries = memmap.nr_entries; /* * Xen won't allow a 1:1 mapping to be created to UNUSABLE * regions, so if we're using the machine memory map leave the * region as RAM as it is in the pseudo-physical map. * * UNUSABLE regions in domUs are not handled and will need * a patch in the future. */ if (xen_initial_domain()) xen_ignore_unusable(); /* Make sure the Xen-supplied memory map is well-ordered. */ sanitize_e820_map(xen_e820_map, ARRAY_SIZE(xen_e820_map), &xen_e820_map_entries); max_pages = xen_get_max_pages(); /* How many extra pages do we need due to remapping? */ max_pages += xen_count_remap_pages(max_pfn); if (max_pages > max_pfn) extra_pages += max_pages - max_pfn; /* * Clamp the amount of extra memory to a EXTRA_MEM_RATIO * factor the base size. On non-highmem systems, the base * size is the full initial memory allocation; on highmem it * is limited to the max size of lowmem, so that it doesn't * get completely filled. * * Make sure we have no memory above max_pages, as this area * isn't handled by the p2m management. * * In principle there could be a problem in lowmem systems if * the initial memory is also very large with respect to * lowmem, but we won't try to deal with that here. */ extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), extra_pages, max_pages - max_pfn); i = 0; addr = xen_e820_map[0].addr; size = xen_e820_map[0].size; while (i < xen_e820_map_entries) { bool discard = false; chunk_size = size; type = xen_e820_map[i].type; if (type == E820_RAM) { if (addr < mem_end) { chunk_size = min(size, mem_end - addr); } else if (extra_pages) { chunk_size = min(size, PFN_PHYS(extra_pages)); pfn_s = PFN_UP(addr); n_pfns = PFN_DOWN(addr + chunk_size) - pfn_s; extra_pages -= n_pfns; xen_add_extra_mem(pfn_s, n_pfns); xen_max_p2m_pfn = pfn_s + n_pfns; } else discard = true; } if (!discard) xen_align_and_add_e820_region(addr, chunk_size, type); addr += chunk_size; size -= chunk_size; if (size == 0) { i++; if (i < xen_e820_map_entries) { addr = xen_e820_map[i].addr; size = xen_e820_map[i].size; } } } /* * Set the rest as identity mapped, in case PCI BARs are * located here. */ set_phys_range_identity(addr / PAGE_SIZE, ~0ul); /* * In domU, the ISA region is normal, usable memory, but we * reserve ISA memory anyway because too many things poke * about in there. */ e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RESERVED); sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); /* * Check whether the kernel itself conflicts with the target E820 map. * Failing now is better than running into weird problems later due * to relocating (and even reusing) pages with kernel text or data. */ if (xen_is_e820_reserved(__pa_symbol(_text), __pa_symbol(__bss_stop) - __pa_symbol(_text))) { xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n"); BUG(); } /* * Check for a conflict of the hypervisor supplied page tables with * the target E820 map. */ xen_pt_check_e820(); xen_reserve_xen_mfnlist(); /* Check for a conflict of the initrd with the target E820 map. */ if (xen_is_e820_reserved(boot_params.hdr.ramdisk_image, boot_params.hdr.ramdisk_size)) { phys_addr_t new_area, start, size; new_area = xen_find_free_area(boot_params.hdr.ramdisk_size); if (!new_area) { xen_raw_console_write("Can't find new memory area for initrd needed due to E820 map conflict\n"); BUG(); } start = boot_params.hdr.ramdisk_image; size = boot_params.hdr.ramdisk_size; xen_phys_memcpy(new_area, start, size); pr_info("initrd moved from [mem %#010llx-%#010llx] to [mem %#010llx-%#010llx]\n", start, start + size, new_area, new_area + size); memblock_free(start, size); boot_params.hdr.ramdisk_image = new_area; boot_params.ext_ramdisk_image = new_area >> 32; }
char * __init machine_specific_memory_setup(void) { char *who; who = "NOT VOYAGER"; if(voyager_level == 5) { __u32 addr, length; int i; who = "Voyager-SUS"; e820.nr_map = 0; for(i=0; voyager_memory_detect(i, &addr, &length); i++) { add_memory_region(addr, length, E820_RAM); } return who; } else if(voyager_level == 4) { __u32 tom; __u16 catbase = inb(VOYAGER_SSPB_RELOCATION_PORT)<<8; /* select the DINO config space */ outb(VOYAGER_DINO, VOYAGER_CAT_CONFIG_PORT); /* Read DINO top of memory register */ tom = ((inb(catbase + 0x4) & 0xf0) << 16) + ((inb(catbase + 0x5) & 0x7f) << 24); if(inb(catbase) != VOYAGER_DINO) { printk(KERN_ERR "Voyager: Failed to get DINO for L4, setting tom to EXT_MEM_K\n"); tom = (EXT_MEM_K)<<10; } who = "Voyager-TOM"; add_memory_region(0, 0x9f000, E820_RAM); /* map from 1M to top of memory */ add_memory_region(1*1024*1024, tom - 1*1024*1024, E820_RAM); /* FIXME: Should check the ASICs to see if I need to * take out the 8M window. Just do it at the moment * */ add_memory_region(8*1024*1024, 8*1024*1024, E820_RESERVED); return who; } who = "BIOS-e820"; /* * Try to copy the BIOS-supplied E820-map. * * Otherwise fake a memory map; one section from 0k->640k, * the next section from 1mb->appropriate_mem_k */ sanitize_e820_map(E820_MAP, &E820_MAP_NR); if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) { unsigned long mem_size; /* compare results from other methods and take the greater */ if (ALT_MEM_K < EXT_MEM_K) { mem_size = EXT_MEM_K; who = "BIOS-88"; } else { mem_size = ALT_MEM_K; who = "BIOS-e801"; } e820.nr_map = 0; add_memory_region(0, LOWMEMSIZE(), E820_RAM); add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM); } return who; }
/** * machine_specific_memory_setup - Hook for machine specific memory setup. **/ char * __init xen_memory_setup(void) { static struct e820entry map[E820MAX] __initdata; unsigned long max_pfn = xen_start_info->nr_pages; unsigned long long mem_end; int rc; struct xen_memory_map memmap; unsigned long extra_pages = 0; unsigned long extra_limit; int i; int op; max_pfn = min(MAX_DOMAIN_PAGES, max_pfn); mem_end = PFN_PHYS(max_pfn); memmap.nr_entries = E820MAX; set_xen_guest_handle(memmap.buffer, map); op = xen_initial_domain() ? XENMEM_machine_memory_map : XENMEM_memory_map; rc = HYPERVISOR_memory_op(op, &memmap); if (rc == -ENOSYS) { BUG_ON(xen_initial_domain()); memmap.nr_entries = 1; map[0].addr = 0ULL; map[0].size = mem_end; /* 8MB slack (to balance backend allocations). */ map[0].size += 8ULL << 20; map[0].type = E820_RAM; rc = 0; } BUG_ON(rc); e820.nr_map = 0; xen_extra_mem_start = mem_end; for (i = 0; i < memmap.nr_entries; i++) { unsigned long long end = map[i].addr + map[i].size; if (map[i].type == E820_RAM) { if (map[i].addr < mem_end && end > mem_end) { /* Truncate region to max_mem. */ u64 delta = end - mem_end; map[i].size -= delta; extra_pages += PFN_DOWN(delta); end = mem_end; } } if (end > xen_extra_mem_start) xen_extra_mem_start = end; /* If region is non-RAM or below mem_end, add what remains */ if ((map[i].type != E820_RAM || map[i].addr < mem_end) && map[i].size > 0) e820_add_region(map[i].addr, map[i].size, map[i].type); } /* * In domU, the ISA region is normal, usable memory, but we * reserve ISA memory anyway because too many things poke * about in there. * * In Dom0, the host E820 information can leave gaps in the * ISA range, which would cause us to release those pages. To * avoid this, we unconditionally reserve them here. */ e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RESERVED); /* * Reserve Xen bits: * - mfn_list * - xen_start_info * See comment above "struct start_info" in <xen/interface/xen.h> */ memblock_x86_reserve_range(__pa(xen_start_info->mfn_list), __pa(xen_start_info->pt_base), "XEN START INFO"); sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820); /* * Clamp the amount of extra memory to a EXTRA_MEM_RATIO * factor the base size. On non-highmem systems, the base * size is the full initial memory allocation; on highmem it * is limited to the max size of lowmem, so that it doesn't * get completely filled. * * In principle there could be a problem in lowmem systems if * the initial memory is also very large with respect to * lowmem, but we won't try to deal with that here. */ extra_limit = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), max_pfn + extra_pages); if (extra_limit >= max_pfn) extra_pages = extra_limit - max_pfn; else extra_pages = 0; if (!xen_initial_domain()) xen_add_extra_mem(extra_pages); return "Xen"; }