static struct cpio_data __init find_ucode_in_initrd(void) { long offset = 0; char *path; void *start; size_t size; #ifdef CONFIG_X86_32 struct boot_params *p; /* * On 32-bit, early load occurs before paging is turned on so we need * to use physical addresses. */ p = (struct boot_params *)__pa_nodebug(&boot_params); path = (char *)__pa_nodebug(ucode_path); start = (void *)p->hdr.ramdisk_image; size = p->hdr.ramdisk_size; #else path = ucode_path; start = (void *)(boot_params.hdr.ramdisk_image + PAGE_OFFSET); size = boot_params.hdr.ramdisk_size; #endif return find_cpio_data(path, start, size, &offset); }
void __init load_ucode_amd_bsp(unsigned int family) { struct cpio_data cp; void **data; size_t *size; #ifdef CONFIG_X86_32 data = (void **)__pa_nodebug(&ucode_cpio.data); size = (size_t *)__pa_nodebug(&ucode_cpio.size); #else data = &ucode_cpio.data; size = &ucode_cpio.size; #endif cp = find_ucode_in_initrd(); if (!cp.data) { if (!load_builtin_amd_microcode(&cp, family)) return; } *data = cp.data; *size = cp.size; apply_ucode_in_initrd(cp.data, cp.size, true); }
static void __init kasan_early_p4d_populate(pgd_t *pgd, unsigned long addr, unsigned long end) { pgd_t pgd_entry; p4d_t *p4d, p4d_entry; unsigned long next; if (pgd_none(*pgd)) { pgd_entry = __pgd(_KERNPG_TABLE | __pa_nodebug(kasan_early_shadow_p4d)); set_pgd(pgd, pgd_entry); } p4d = early_p4d_offset(pgd, addr); do { next = p4d_addr_end(addr, end); if (!p4d_none(*p4d)) continue; p4d_entry = __p4d(_KERNPG_TABLE | __pa_nodebug(kasan_early_shadow_pud)); set_p4d(p4d, p4d_entry); } while (p4d++, addr = next, addr != end && p4d_none(*p4d)); }
void __init kasan_early_init(void) { int i; pteval_t pte_val = __pa_nodebug(kasan_early_shadow_page) | __PAGE_KERNEL | _PAGE_ENC; pmdval_t pmd_val = __pa_nodebug(kasan_early_shadow_pte) | _KERNPG_TABLE; pudval_t pud_val = __pa_nodebug(kasan_early_shadow_pmd) | _KERNPG_TABLE; p4dval_t p4d_val = __pa_nodebug(kasan_early_shadow_pud) | _KERNPG_TABLE; /* Mask out unsupported __PAGE_KERNEL bits: */ pte_val &= __default_kernel_pte_mask; pmd_val &= __default_kernel_pte_mask; pud_val &= __default_kernel_pte_mask; p4d_val &= __default_kernel_pte_mask; for (i = 0; i < PTRS_PER_PTE; i++) kasan_early_shadow_pte[i] = __pte(pte_val); for (i = 0; i < PTRS_PER_PMD; i++) kasan_early_shadow_pmd[i] = __pmd(pmd_val); for (i = 0; i < PTRS_PER_PUD; i++) kasan_early_shadow_pud[i] = __pud(pud_val); for (i = 0; pgtable_l5_enabled() && i < PTRS_PER_P4D; i++) kasan_early_shadow_p4d[i] = __p4d(p4d_val); kasan_map_early_shadow(early_top_pgt); kasan_map_early_shadow(init_top_pgt); }
static bool check_loader_disabled_ap(void) { #ifdef CONFIG_X86_32 return *((bool *)__pa_nodebug(&dis_ucode_ldr)); #else return dis_ucode_ldr; #endif }
void __init kasan_early_init(void) { int i; pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL; pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE; pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE; for (i = 0; i < PTRS_PER_PTE; i++) kasan_zero_pte[i] = __pte(pte_val); for (i = 0; i < PTRS_PER_PMD; i++) kasan_zero_pmd[i] = __pmd(pmd_val); for (i = 0; i < PTRS_PER_PUD; i++) kasan_zero_pud[i] = __pud(pud_val); kasan_map_early_shadow(early_level4_pgt); kasan_map_early_shadow(init_level4_pgt); }
static bool __init check_loader_disabled_bsp(void) { #ifdef CONFIG_X86_32 const char *cmdline = (const char *)__pa_nodebug(boot_command_line); const char *opt = "dis_ucode_ldr"; const char *option = (const char *)__pa_nodebug(opt); bool *res = (bool *)__pa_nodebug(&dis_ucode_ldr); #else /* CONFIG_X86_64 */ const char *cmdline = boot_command_line; const char *option = "dis_ucode_ldr"; bool *res = &dis_ucode_ldr; #endif if (cmdline_find_option_bool(cmdline, option)) *res = true; return *res; }
/* Create a new PMD entry */ int __init early_make_pgtable(unsigned long address) { unsigned long physaddr = address - __PAGE_OFFSET; unsigned long i; pgdval_t pgd, *pgd_p; pudval_t pud, *pud_p; pmdval_t pmd, *pmd_p; /* Invalid address or early pgt is done ? */ if (physaddr >= MAXMEM || read_cr3() != __pa_nodebug(early_level4_pgt)) return -1; again: pgd_p = &early_level4_pgt[pgd_index(address)].pgd; pgd = *pgd_p; /* * The use of __early_va rather than __va here is critical: * __va would point us back into the dynamic * range and we might end up looping forever... */ if (pgd) pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK)); else { if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) { reset_early_page_tables(); goto again; } pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++]; for (i = 0; i < PTRS_PER_PUD; i++) pud_p[i] = 0; *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE; } pud_p += pud_index(address); pud = *pud_p; if (pud) pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK)); else { if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) { reset_early_page_tables(); goto again; } pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++]; for (i = 0; i < PTRS_PER_PMD; i++) pmd_p[i] = 0; *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE; } pmd = (physaddr & PMD_MASK) + early_pmd_flags; pmd_p[pmd_index(address)] = pmd; return 0; }
/* * On 32-bit, since AP's early load occurs before paging is turned on, we * cannot traverse cpu_equiv_table and pcache in kernel heap memory. So during * cold boot, AP will apply_ucode_in_initrd() just like the BSP. During * save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch, * which is used upon resume from suspend. */ void load_ucode_amd_ap(void) { struct microcode_amd *mc; size_t *usize; void **ucode; mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch); if (mc->hdr.patch_id && mc->hdr.processor_rev_id) { __apply_microcode_amd(mc); return; } ucode = (void *)__pa_nodebug(&container); usize = (size_t *)__pa_nodebug(&container_size); if (!*ucode || !*usize) return; apply_ucode_in_initrd(*ucode, *usize, false); }
static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr) { unsigned long p4d; if (!pgtable_l5_enabled()) return (p4d_t *)pgd; p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK; p4d += __START_KERNEL_map - phys_base; return (p4d_t *)p4d + p4d_index(addr); }
/* Wipe all early page tables except for the kernel symbol map */ static void __init reset_early_page_tables(void) { unsigned long i; for (i = 0; i < PTRS_PER_PGD-1; i++) early_level4_pgt[i].pgd = 0; next_early_pgt = 0; write_cr3(__pa_nodebug(early_level4_pgt)); }
static void __init kasan_map_early_shadow(pgd_t *pgd) { int i; unsigned long start = KASAN_SHADOW_START; unsigned long end = KASAN_SHADOW_END; for (i = pgd_index(start); start < end; i++) { pgd[i] = __pgd(__pa_nodebug(kasan_zero_pud) | _KERNPG_TABLE); start += PGDIR_SIZE; } }
/* Create a new PMD entry */ int __init early_make_pgtable(unsigned long address) { unsigned long physaddr = address - __PAGE_OFFSET; pgdval_t pgd, *pgd_p; pudval_t pud, *pud_p; pmdval_t pmd, *pmd_p; /* Invalid address or early pgt is done ? */ if (physaddr >= MAXMEM || read_cr3() != __pa_nodebug(early_level4_pgt)) return -1; again: pgd_p = &early_level4_pgt[pgd_index(address)].pgd; pgd = *pgd_p; /* * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is * critical -- __PAGE_OFFSET would point us back into the dynamic * range and we might end up looping forever... */ if (pgd) pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base); else { if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) { reset_early_page_tables(); goto again; } pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++]; memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD); *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE; } pud_p += pud_index(address); pud = *pud_p; if (pud) pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base); else { if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) { reset_early_page_tables(); goto again; } pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++]; memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD); *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE; } pmd = (physaddr & PMD_MASK) + early_pmd_flags; pmd_p[pmd_index(address)] = pmd; return 0; }
/* * Early load occurs before we can vmalloc(). So we look for the microcode * patch container file in initrd, traverse equivalent cpu table, look for a * matching microcode patch, and update, all in initrd memory in place. * When vmalloc() is available for use later -- on 64-bit during first AP load, * and on 32-bit during save_microcode_in_initrd_amd() -- we can call * load_microcode_amd() to save equivalent cpu table and microcode patches in * kernel heap memory. */ static void apply_ucode_in_initrd(void *ucode, size_t size, bool save_patch) { struct equiv_cpu_entry *eq; size_t *cont_sz; u32 *header; u8 *data, **cont; u8 (*patch)[PATCH_MAX_SIZE]; u16 eq_id = 0; int offset, left; u32 rev, eax, ebx, ecx, edx; u32 *new_rev; #ifdef CONFIG_X86_32 new_rev = (u32 *)__pa_nodebug(&ucode_new_rev); cont_sz = (size_t *)__pa_nodebug(&container_size); cont = (u8 **)__pa_nodebug(&container); patch = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch); #else new_rev = &ucode_new_rev; cont_sz = &container_size; cont = &container; patch = &amd_ucode_patch; #endif data = ucode; left = size; header = (u32 *)data; /* find equiv cpu table */ if (header[0] != UCODE_MAGIC || header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */ header[2] == 0) /* size */ return; eax = 0x00000001; ecx = 0; native_cpuid(&eax, &ebx, &ecx, &edx); while (left > 0) { eq = (struct equiv_cpu_entry *)(data + CONTAINER_HDR_SZ); *cont = data; /* Advance past the container header */ offset = header[2] + CONTAINER_HDR_SZ; data += offset; left -= offset; eq_id = find_equiv_id(eq, eax); if (eq_id) { this_equiv_id = eq_id; *cont_sz = compute_container_size(*cont, left + offset); /* * truncate how much we need to iterate over in the * ucode update loop below */ left = *cont_sz - offset; break; } /* * support multiple container files appended together. if this * one does not have a matching equivalent cpu entry, we fast * forward to the next container file. */ while (left > 0) { header = (u32 *)data; if (header[0] == UCODE_MAGIC && header[1] == UCODE_EQUIV_CPU_TABLE_TYPE) break; offset = header[1] + SECTION_HDR_SIZE; data += offset; left -= offset; } /* mark where the next microcode container file starts */ offset = data - (u8 *)ucode; ucode = data; } if (!eq_id) { *cont = NULL; *cont_sz = 0; return; } /* find ucode and update if needed */ native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax); while (left > 0) { struct microcode_amd *mc; header = (u32 *)data; if (header[0] != UCODE_UCODE_TYPE || /* type */ header[1] == 0) /* size */ break; mc = (struct microcode_amd *)(data + SECTION_HDR_SIZE); if (eq_id == mc->hdr.processor_rev_id && rev < mc->hdr.patch_id) { if (!__apply_microcode_amd(mc)) { rev = mc->hdr.patch_id; *new_rev = rev; if (save_patch) memcpy(patch, mc, min_t(u32, header[1], PATCH_MAX_SIZE)); } } offset = header[1] + SECTION_HDR_SIZE; data += offset; left -= offset; } }
/* Wipe all early page tables except for the kernel symbol map */ static void __init reset_early_page_tables(void) { memset(early_level4_pgt, 0, sizeof(pgd_t)*(PTRS_PER_PGD-1)); next_early_pgt = 0; write_cr3(__pa_nodebug(early_level4_pgt)); }
t->ax, t->bx, t->cx, t->dx); printk(KERN_EMERG "esi = %08lx, edi = %08lx\n", t->si, t->di); } } for (;;) cpu_relax(); } struct tss_struct doublefault_tss __cacheline_aligned = { .x86_tss = { .sp0 = STACK_START, .ss0 = __KERNEL_DS, .ldt = 0, .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, .ip = (unsigned long) doublefault_fn, /* 0x2 bit is always set */ .flags = X86_EFLAGS_SF | 0x2, .sp = STACK_START, .es = __KERNEL_DS, .cs = __KERNEL_CS, .ss = __KERNEL_DS, .ds = __KERNEL_DS, .fs = __KERNEL_PERCPU, .__cr3 = __pa_nodebug(swapper_pg_dir), } };