/* * Like memcpy, but with physical addresses for dest and src. */ static void __init xen_phys_memcpy(phys_addr_t dest, phys_addr_t src, phys_addr_t n) { phys_addr_t dest_off, src_off, dest_len, src_len, len; void *from, *to; while (n) { dest_off = dest & ~PAGE_MASK; src_off = src & ~PAGE_MASK; dest_len = n; if (dest_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off) dest_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off; src_len = n; if (src_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off) src_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off; len = min(dest_len, src_len); to = early_memremap(dest - dest_off, dest_len + dest_off); from = early_memremap(src - src_off, src_len + src_off); memcpy(to, from, len); early_memunmap(to, dest_len + dest_off); early_memunmap(from, src_len + src_off); n -= len; dest += len; src += len; } }
void __init __acpi_unmap_table(char *map, unsigned long size) { if (!map || !size) return; early_memunmap(map, size); }
/* * Reserve the memory associated with the Memory Attributes configuration * table, if it exists. */ int __init efi_memattr_init(void) { efi_memory_attributes_table_t *tbl; if (efi.mem_attr_table == EFI_INVALID_TABLE_ADDR) return 0; tbl = early_memremap(efi.mem_attr_table, sizeof(*tbl)); if (!tbl) { pr_err("Failed to map EFI Memory Attributes table @ 0x%lx\n", efi.mem_attr_table); return -ENOMEM; } if (tbl->version > 1) { pr_warn("Unexpected EFI Memory Attributes table version %d\n", tbl->version); goto unmap; } tbl_size = sizeof(*tbl) + tbl->num_entries * tbl->desc_size; memblock_reserve(efi.mem_attr_table, tbl_size); unmap: early_memunmap(tbl, sizeof(*tbl)); return 0; }
/* * A number of config table entries get remapped to virtual addresses * after entering EFI virtual mode. However, the kexec kernel requires * their physical addresses therefore we pass them via setup_data and * correct those entries to their respective physical addresses here. * * Currently only handles smbios which is necessary for some firmware * implementation. */ int __init efi_reuse_config(u64 tables, int nr_tables) { int i, sz, ret = 0; void *p, *tablep; struct efi_setup_data *data; if (!efi_setup) return 0; if (!efi_enabled(EFI_64BIT)) return 0; data = early_memremap(efi_setup, sizeof(*data)); if (!data) { ret = -ENOMEM; goto out; } if (!data->smbios) goto out_memremap; sz = sizeof(efi_config_table_64_t); p = tablep = early_memremap(tables, nr_tables * sz); if (!p) { pr_err("Could not map Configuration table!\n"); ret = -ENOMEM; goto out_memremap; } for (i = 0; i < efi.systab->nr_tables; i++) { efi_guid_t guid; guid = ((efi_config_table_64_t *)p)->guid; if (!efi_guidcmp(guid, SMBIOS_TABLE_GUID)) ((efi_config_table_64_t *)p)->table = data->smbios; p += sz; } early_memunmap(tablep, nr_tables * sz); out_memremap: early_memunmap(data, sizeof(*data)); out: return ret; }
static void __ref sfi_unmap_memory(void __iomem *virt, u32 size) { if (!virt || !size) return; if (sfi_use_memremap) memunmap(virt); else early_memunmap(virt, size); }
static void __init jailhouse_init_platform(void) { u64 pa_data = boot_params.hdr.setup_data; struct setup_data header; void *mapping; x86_init.irqs.pre_vector_init = x86_init_noop; x86_init.timers.timer_init = jailhouse_timer_init; x86_init.mpparse.get_smp_config = jailhouse_get_smp_config; x86_init.pci.arch_init = jailhouse_pci_arch_init; x86_platform.calibrate_cpu = jailhouse_get_tsc; x86_platform.calibrate_tsc = jailhouse_get_tsc; x86_platform.get_wallclock = jailhouse_get_wallclock; x86_platform.legacy.rtc = 0; x86_platform.legacy.warm_reset = 0; x86_platform.legacy.i8042 = X86_LEGACY_I8042_PLATFORM_ABSENT; legacy_pic = &null_legacy_pic; machine_ops.emergency_restart = jailhouse_no_restart; while (pa_data) { mapping = early_memremap(pa_data, sizeof(header)); memcpy(&header, mapping, sizeof(header)); early_memunmap(mapping, sizeof(header)); if (header.type == SETUP_JAILHOUSE && header.len >= sizeof(setup_data)) { pa_data += offsetof(struct setup_data, data); mapping = early_memremap(pa_data, sizeof(setup_data)); memcpy(&setup_data, mapping, sizeof(setup_data)); early_memunmap(mapping, sizeof(setup_data)); break; } pa_data = header.next; }
void __init efi_fake_memmap(void) { int new_nr_map = efi.memmap.nr_map; efi_memory_desc_t *md; phys_addr_t new_memmap_phy; void *new_memmap; int i; if (!nr_fake_mem) return; /* count up the number of EFI memory descriptor */ for (i = 0; i < nr_fake_mem; i++) { for_each_efi_memory_desc(md) { struct range *r = &fake_mems[i].range; new_nr_map += efi_memmap_split_count(md, r); } } /* allocate memory for new EFI memmap */ new_memmap_phy = memblock_alloc(efi.memmap.desc_size * new_nr_map, PAGE_SIZE); if (!new_memmap_phy) return; /* create new EFI memmap */ new_memmap = early_memremap(new_memmap_phy, efi.memmap.desc_size * new_nr_map); if (!new_memmap) { memblock_free(new_memmap_phy, efi.memmap.desc_size * new_nr_map); return; } for (i = 0; i < nr_fake_mem; i++) efi_memmap_insert(&efi.memmap, new_memmap, &fake_mems[i]); /* swap into new EFI memmap */ early_memunmap(new_memmap, efi.memmap.desc_size * new_nr_map); efi_memmap_install(new_memmap_phy, new_nr_map); /* print new EFI memmap */ efi_print_memmap(); }
void __init acpi_table_upgrade(void) { void *data = (void *)initrd_start; size_t size = initrd_end - initrd_start; int sig, no, table_nr = 0, total_offset = 0; long offset = 0; struct acpi_table_header *table; char cpio_path[32] = "kernel/firmware/acpi/"; struct cpio_data file; if (data == NULL || size == 0) return; for (no = 0; no < NR_ACPI_INITRD_TABLES; no++) { file = find_cpio_data(cpio_path, data, size, &offset); if (!file.data) break; data += offset; size -= offset; if (file.size < sizeof(struct acpi_table_header)) { pr_err("ACPI OVERRIDE: Table smaller than ACPI header [%s%s]\n", cpio_path, file.name); continue; } table = file.data; for (sig = 0; table_sigs[sig]; sig++) if (!memcmp(table->signature, table_sigs[sig], 4)) break; if (!table_sigs[sig]) { pr_err("ACPI OVERRIDE: Unknown signature [%s%s]\n", cpio_path, file.name); continue; } if (file.size != table->length) { pr_err("ACPI OVERRIDE: File length does not match table length [%s%s]\n", cpio_path, file.name); continue; } if (acpi_table_checksum(file.data, table->length)) { pr_err("ACPI OVERRIDE: Bad table checksum [%s%s]\n", cpio_path, file.name); continue; } pr_info("%4.4s ACPI table found in initrd [%s%s][0x%x]\n", table->signature, cpio_path, file.name, table->length); all_tables_size += table->length; acpi_initrd_files[table_nr].data = file.data; acpi_initrd_files[table_nr].size = file.size; table_nr++; } if (table_nr == 0) return; acpi_tables_addr = memblock_find_in_range(0, ACPI_TABLE_UPGRADE_MAX_PHYS, all_tables_size, PAGE_SIZE); if (!acpi_tables_addr) { WARN_ON(1); return; } /* * Only calling e820_add_reserve does not work and the * tables are invalid (memory got used) later. * memblock_reserve works as expected and the tables won't get modified. * But it's not enough on X86 because ioremap will * complain later (used by acpi_os_map_memory) that the pages * that should get mapped are not marked "reserved". * Both memblock_reserve and e820_add_region (via arch_reserve_mem_area) * works fine. */ memblock_reserve(acpi_tables_addr, all_tables_size); arch_reserve_mem_area(acpi_tables_addr, all_tables_size); /* * early_ioremap only can remap 256k one time. If we map all * tables one time, we will hit the limit. Need to map chunks * one by one during copying the same as that in relocate_initrd(). */ for (no = 0; no < table_nr; no++) { unsigned char *src_p = acpi_initrd_files[no].data; phys_addr_t size = acpi_initrd_files[no].size; phys_addr_t dest_addr = acpi_tables_addr + total_offset; phys_addr_t slop, clen; char *dest_p; total_offset += size; while (size) { slop = dest_addr & ~PAGE_MASK; clen = size; if (clen > MAP_CHUNK_SIZE - slop) clen = MAP_CHUNK_SIZE - slop; dest_p = early_memremap(dest_addr & PAGE_MASK, clen + slop); memcpy(dest_p + slop, src_p, clen); early_memunmap(dest_p, clen + slop); src_p += clen; dest_addr += clen; size -= clen; } } }