void __init kasan_init(void) { int i; #ifdef CONFIG_KASAN_INLINE register_die_notifier(&kasan_die_notifier); #endif memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt)); load_cr3(early_level4_pgt); __flush_tlb_all(); clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); kasan_populate_zero_shadow((void *)KASAN_SHADOW_START, kasan_mem_to_shadow((void *)PAGE_OFFSET)); for (i = 0; i < E820_X_MAX; i++) { if (pfn_mapped[i].end == 0) break; if (map_range(&pfn_mapped[i])) panic("kasan: unable to allocate shadow!"); } kasan_populate_zero_shadow( kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), kasan_mem_to_shadow((void *)__START_KERNEL_map)); vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext), (unsigned long)kasan_mem_to_shadow(_end), NUMA_NO_NODE); kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), (void *)KASAN_SHADOW_END); load_cr3(init_level4_pgt); __flush_tlb_all(); /* * kasan_zero_page has been used as early shadow memory, thus it may * contain some garbage. Now we can clear and write protect it, since * after the TLB flush no one should write to it. */ memset(kasan_zero_page, 0, PAGE_SIZE); for (i = 0; i < PTRS_PER_PTE; i++) { pte_t pte = __pte(__pa(kasan_zero_page) | __PAGE_KERNEL_RO); set_pte(&kasan_zero_pte[i], pte); } /* Flush TLBs again to be sure that write protection applied. */ __flush_tlb_all(); init_task.kasan_depth = 0; pr_info("KernelAddressSanitizer initialized\n"); }
void __init kasan_init(void) { u64 kimg_shadow_start, kimg_shadow_end; u64 mod_shadow_start, mod_shadow_end; struct memblock_region *reg; int i; kimg_shadow_start = (u64)kasan_mem_to_shadow(_text) & PAGE_MASK; kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(_end)); mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR); mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END); /* * We are going to perform proper setup of shadow memory. * At first we should unmap early shadow (clear_pgds() call below). * However, instrumented code couldn't execute without shadow memory. * tmp_pg_dir used to keep early shadow mapped until full shadow * setup will be finished. */ memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir)); dsb(ishst); cpu_replace_ttbr1(lm_alias(tmp_pg_dir)); clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); kasan_map_populate(kimg_shadow_start, kimg_shadow_end, early_pfn_to_nid(virt_to_pfn(lm_alias(_text)))); kasan_populate_early_shadow((void *)KASAN_SHADOW_START, (void *)mod_shadow_start); kasan_populate_early_shadow((void *)kimg_shadow_end, kasan_mem_to_shadow((void *)PAGE_OFFSET)); if (kimg_shadow_start > mod_shadow_end) kasan_populate_early_shadow((void *)mod_shadow_end, (void *)kimg_shadow_start); for_each_memblock(memory, reg) { void *start = (void *)__phys_to_virt(reg->base); void *end = (void *)__phys_to_virt(reg->base + reg->size); if (start >= end) break; kasan_map_populate((unsigned long)kasan_mem_to_shadow(start), (unsigned long)kasan_mem_to_shadow(end), early_pfn_to_nid(virt_to_pfn(start))); }
void __init kasan_init(void) { u64 kimg_shadow_start, kimg_shadow_end; u64 mod_shadow_start, mod_shadow_end; struct memblock_region *reg; int i; kimg_shadow_start = (u64)kasan_mem_to_shadow(_text); kimg_shadow_end = (u64)kasan_mem_to_shadow(_end); mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR); mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END); /* * We are going to perform proper setup of shadow memory. * At first we should unmap early shadow (clear_pgds() call bellow). * However, instrumented code couldn't execute without shadow memory. * tmp_pg_dir used to keep early shadow mapped until full shadow * setup will be finished. */ memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir)); dsb(ishst); cpu_replace_ttbr1(lm_alias(tmp_pg_dir)); clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); vmemmap_populate(kimg_shadow_start, kimg_shadow_end, pfn_to_nid(virt_to_pfn(_text))); /* * vmemmap_populate() has populated the shadow region that covers the * kernel image with SWAPPER_BLOCK_SIZE mappings, so we have to round * the start and end addresses to SWAPPER_BLOCK_SIZE as well, to prevent * kasan_populate_zero_shadow() from replacing the page table entries * (PMD or PTE) at the edges of the shadow region for the kernel * image. */ kimg_shadow_start = round_down(kimg_shadow_start, SWAPPER_BLOCK_SIZE); kimg_shadow_end = round_up(kimg_shadow_end, SWAPPER_BLOCK_SIZE); kasan_populate_zero_shadow((void *)KASAN_SHADOW_START, (void *)mod_shadow_start); kasan_populate_zero_shadow((void *)kimg_shadow_end, kasan_mem_to_shadow((void *)PAGE_OFFSET)); if (kimg_shadow_start > mod_shadow_end) kasan_populate_zero_shadow((void *)mod_shadow_end, (void *)kimg_shadow_start); for_each_memblock(memory, reg) { void *start = (void *)__phys_to_virt(reg->base); void *end = (void *)__phys_to_virt(reg->base + reg->size); if (start >= end) break; /* * end + 1 here is intentional. We check several shadow bytes in * advance to slightly speed up fastpath. In some rare cases * we could cross boundary of mapped shadow, so we just map * some more here. */ vmemmap_populate((unsigned long)kasan_mem_to_shadow(start), (unsigned long)kasan_mem_to_shadow(end) + 1, pfn_to_nid(virt_to_pfn(start))); }
void __init kasan_init(void) { int i; void *shadow_cpu_entry_begin, *shadow_cpu_entry_end; #ifdef CONFIG_KASAN_INLINE register_die_notifier(&kasan_die_notifier); #endif memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt)); /* * We use the same shadow offset for 4- and 5-level paging to * facilitate boot-time switching between paging modes. * As result in 5-level paging mode KASAN_SHADOW_START and * KASAN_SHADOW_END are not aligned to PGD boundary. * * KASAN_SHADOW_START doesn't share PGD with anything else. * We claim whole PGD entry to make things easier. * * KASAN_SHADOW_END lands in the last PGD entry and it collides with * bunch of things like kernel code, modules, EFI mapping, etc. * We need to take extra steps to not overwrite them. */ if (pgtable_l5_enabled()) { void *ptr; ptr = (void *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END)); memcpy(tmp_p4d_table, (void *)ptr, sizeof(tmp_p4d_table)); set_pgd(&early_top_pgt[pgd_index(KASAN_SHADOW_END)], __pgd(__pa(tmp_p4d_table) | _KERNPG_TABLE)); } load_cr3(early_top_pgt); __flush_tlb_all(); clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END); kasan_populate_early_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK), kasan_mem_to_shadow((void *)PAGE_OFFSET)); for (i = 0; i < E820_MAX_ENTRIES; i++) { if (pfn_mapped[i].end == 0) break; map_range(&pfn_mapped[i]); } shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE; shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin); shadow_cpu_entry_begin = (void *)round_down( (unsigned long)shadow_cpu_entry_begin, PAGE_SIZE); shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE + CPU_ENTRY_AREA_MAP_SIZE); shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end); shadow_cpu_entry_end = (void *)round_up( (unsigned long)shadow_cpu_entry_end, PAGE_SIZE); kasan_populate_early_shadow( kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), shadow_cpu_entry_begin); kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin, (unsigned long)shadow_cpu_entry_end, 0); kasan_populate_early_shadow(shadow_cpu_entry_end, kasan_mem_to_shadow((void *)__START_KERNEL_map)); kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext), (unsigned long)kasan_mem_to_shadow(_end), early_pfn_to_nid(__pa(_stext))); kasan_populate_early_shadow(kasan_mem_to_shadow((void *)MODULES_END), (void *)KASAN_SHADOW_END); load_cr3(init_top_pgt); __flush_tlb_all(); /* * kasan_early_shadow_page has been used as early shadow memory, thus * it may contain some garbage. Now we can clear and write protect it, * since after the TLB flush no one should write to it. */ memset(kasan_early_shadow_page, 0, PAGE_SIZE); for (i = 0; i < PTRS_PER_PTE; i++) { pte_t pte; pgprot_t prot; prot = __pgprot(__PAGE_KERNEL_RO | _PAGE_ENC); pgprot_val(prot) &= __default_kernel_pte_mask; pte = __pte(__pa(kasan_early_shadow_page) | pgprot_val(prot)); set_pte(&kasan_early_shadow_pte[i], pte); } /* Flush TLBs again to be sure that write protection applied. */ __flush_tlb_all(); init_task.kasan_depth = 0; pr_info("KernelAddressSanitizer initialized\n"); }