void __init kasan_init(void)
{
	u64 kimg_shadow_start, kimg_shadow_end;
	u64 mod_shadow_start, mod_shadow_end;
	struct memblock_region *reg;
	int i;

	kimg_shadow_start = (u64)kasan_mem_to_shadow(_text) & PAGE_MASK;
	kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(_end));

	mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
	mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);

	/*
	 * We are going to perform proper setup of shadow memory.
	 * At first we should unmap early shadow (clear_pgds() call below).
	 * However, instrumented code couldn't execute without shadow memory.
	 * tmp_pg_dir used to keep early shadow mapped until full shadow
	 * setup will be finished.
	 */
	memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
	dsb(ishst);
	cpu_replace_ttbr1(lm_alias(tmp_pg_dir));

	clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);

	kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
			   early_pfn_to_nid(virt_to_pfn(lm_alias(_text))));

	kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
				    (void *)mod_shadow_start);
	kasan_populate_early_shadow((void *)kimg_shadow_end,
				    kasan_mem_to_shadow((void *)PAGE_OFFSET));

	if (kimg_shadow_start > mod_shadow_end)
		kasan_populate_early_shadow((void *)mod_shadow_end,
					    (void *)kimg_shadow_start);

	for_each_memblock(memory, reg) {
		void *start = (void *)__phys_to_virt(reg->base);
		void *end = (void *)__phys_to_virt(reg->base + reg->size);

		if (start >= end)
			break;

		kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
				   (unsigned long)kasan_mem_to_shadow(end),
				   early_pfn_to_nid(virt_to_pfn(start)));
	}
Beispiel #2
0
static void __apply_alternatives(void *alt_region, bool use_linear_alias)
{
	struct alt_instr *alt;
	struct alt_region *region = alt_region;
	__le32 *origptr, *replptr, *updptr;

	for (alt = region->begin; alt < region->end; alt++) {
		u32 insn;
		int i, nr_inst;

		if (!cpus_have_cap(alt->cpufeature))
			continue;

		BUG_ON(alt->alt_len != alt->orig_len);

		pr_info_once("patching kernel code\n");

		origptr = ALT_ORIG_PTR(alt);
		replptr = ALT_REPL_PTR(alt);
		updptr = use_linear_alias ? lm_alias(origptr) : origptr;
		nr_inst = alt->alt_len / sizeof(insn);

		for (i = 0; i < nr_inst; i++) {
			insn = get_alt_insn(alt, origptr + i, replptr + i);
			updptr[i] = cpu_to_le32(insn);
		}

		flush_icache_range((uintptr_t)origptr,
				   (uintptr_t)(origptr + nr_inst));
	}
}
Beispiel #3
0
void __init kasan_init(void)
{
	u64 kimg_shadow_start, kimg_shadow_end;
	u64 mod_shadow_start, mod_shadow_end;
	struct memblock_region *reg;
	int i;

	kimg_shadow_start = (u64)kasan_mem_to_shadow(_text);
	kimg_shadow_end = (u64)kasan_mem_to_shadow(_end);

	mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
	mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);

	/*
	 * We are going to perform proper setup of shadow memory.
	 * At first we should unmap early shadow (clear_pgds() call bellow).
	 * However, instrumented code couldn't execute without shadow memory.
	 * tmp_pg_dir used to keep early shadow mapped until full shadow
	 * setup will be finished.
	 */
	memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
	dsb(ishst);
	cpu_replace_ttbr1(lm_alias(tmp_pg_dir));

	clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);

	vmemmap_populate(kimg_shadow_start, kimg_shadow_end,
			 pfn_to_nid(virt_to_pfn(_text)));

	/*
	 * vmemmap_populate() has populated the shadow region that covers the
	 * kernel image with SWAPPER_BLOCK_SIZE mappings, so we have to round
	 * the start and end addresses to SWAPPER_BLOCK_SIZE as well, to prevent
	 * kasan_populate_zero_shadow() from replacing the page table entries
	 * (PMD or PTE) at the edges of the shadow region for the kernel
	 * image.
	 */
	kimg_shadow_start = round_down(kimg_shadow_start, SWAPPER_BLOCK_SIZE);
	kimg_shadow_end = round_up(kimg_shadow_end, SWAPPER_BLOCK_SIZE);

	kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
				   (void *)mod_shadow_start);
	kasan_populate_zero_shadow((void *)kimg_shadow_end,
				   kasan_mem_to_shadow((void *)PAGE_OFFSET));

	if (kimg_shadow_start > mod_shadow_end)
		kasan_populate_zero_shadow((void *)mod_shadow_end,
					   (void *)kimg_shadow_start);

	for_each_memblock(memory, reg) {
		void *start = (void *)__phys_to_virt(reg->base);
		void *end = (void *)__phys_to_virt(reg->base + reg->size);

		if (start >= end)
			break;

		/*
		 * end + 1 here is intentional. We check several shadow bytes in
		 * advance to slightly speed up fastpath. In some rare cases
		 * we could cross boundary of mapped shadow, so we just map
		 * some more here.
		 */
		vmemmap_populate((unsigned long)kasan_mem_to_shadow(start),
				(unsigned long)kasan_mem_to_shadow(end) + 1,
				pfn_to_nid(virt_to_pfn(start)));
	}