Beispiel #1
0
void __init efi_call_phys_epilog(pgd_t *save_pgd)
{
	/*
	 * After the lock is released, the original page table is restored.
	 */
	int pgd_idx, i;
	int nr_pgds;
	pgd_t *pgd;
	p4d_t *p4d;
	pud_t *pud;

	if (!efi_enabled(EFI_OLD_MEMMAP)) {
		write_cr3((unsigned long)save_pgd);
		__flush_tlb_all();
		return;
	}

	nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);

	for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) {
		pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE);
		set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);

		if (!(pgd_val(*pgd) & _PAGE_PRESENT))
			continue;

		for (i = 0; i < PTRS_PER_P4D; i++) {
			p4d = p4d_offset(pgd,
					 pgd_idx * PGDIR_SIZE + i * P4D_SIZE);

			if (!(p4d_val(*p4d) & _PAGE_PRESENT))
				continue;

			pud = (pud_t *)p4d_page_vaddr(*p4d);
			pud_free(&init_mm, pud);
		}

		p4d = (p4d_t *)pgd_page_vaddr(*pgd);
		p4d_free(&init_mm, p4d);
	}

	kfree(save_pgd);

	__flush_tlb_all();
	early_code_mapping_set_exec(0);
}
Beispiel #2
0
static void do_flush_tlb_all(void *info)
{
	unsigned long cpu = smp_processor_id();

	__flush_tlb_all();
	if (read_pda(mmu_state) == TLBSTATE_LAZY)
		leave_mm(cpu);
}
Beispiel #3
0
static void do_flush_tlb_all(void* info)
{
	unsigned long cpu = smp_processor_id();

	__flush_tlb_all();
	if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
		leave_mm(cpu);
}
Beispiel #4
0
static inline void do_flush_tlb_all_local(void)
{
	unsigned long cpu = smp_processor_id();

	__flush_tlb_all();
	if (cpu_tlbstate[cpu].state == TLBSTATE_LAZY)
		leave_mm(cpu);
}
Beispiel #5
0
static void do_flush_tlb_all(void *info)
{
	unsigned long cpu = smp_processor_id();

	__flush_tlb_all();
	if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_LAZY)
		leave_mm(cpu);
}
Beispiel #6
0
static void __meminit
phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
{
	pmd_t *pmd = pmd_offset(pud,0);
	spin_lock(&init_mm.page_table_lock);
	phys_pmd_init(pmd, address, end);
	spin_unlock(&init_mm.page_table_lock);
	__flush_tlb_all();
}
Beispiel #7
0
/*
 * Activate a secondary processor.
 */
notrace static void __cpuinit start_secondary(void *unused)
{
	/*
	 * Don't put *anything* before cpu_init(), SMP booting is too
	 * fragile that we want to limit the things done here to the
	 * most necessary things.
	 */
	cpu_init();
	x86_cpuinit.early_percpu_clock_init();
	preempt_disable();
	smp_callin();

#ifdef CONFIG_X86_32
	/* switch away from the initial page table */
	load_cr3(swapper_pg_dir);
	__flush_tlb_all();
#endif

	/* otherwise gcc will move up smp_processor_id before the cpu_init */
	barrier();
	/*
	 * Check TSC synchronization with the BP:
	 */
	check_tsc_sync_target();

	/*
	 * We need to hold call_lock, so there is no inconsistency
	 * between the time smp_call_function() determines number of
	 * IPI recipients, and the time when the determination is made
	 * for which cpus receive the IPI. Holding this
	 * lock helps us to not include this cpu in a currently in progress
	 * smp_call_function().
	 *
	 * We need to hold vector_lock so there the set of online cpus
	 * does not change while we are assigning vectors to cpus.  Holding
	 * this lock ensures we don't half assign or remove an irq from a cpu.
	 */
	ipi_call_lock();
	lock_vector_lock();
	set_cpu_online(smp_processor_id(), true);
	unlock_vector_lock();
	ipi_call_unlock();
	per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
	x86_platform.nmi_init();

	/* enable local interrupts */
	local_irq_enable();

	/* to prevent fake stack check failure in clock setup */
	boot_init_stack_canary();

	x86_cpuinit.setup_percpu_clockev();

	wmb();
	cpu_idle();
}
/*
 * Activate a secondary processor.
 */
notrace static void __cpuinit start_secondary(void *unused)
{
    /*
     * Don't put *anything* before cpu_init(), SMP booting is too
     * fragile that we want to limit the things done here to the
     * most necessary things.
     */
    cpu_init();
    x86_cpuinit.early_percpu_clock_init();
    preempt_disable();
    smp_callin();

    enable_start_cpu0 = 0;

#ifdef CONFIG_X86_32
    /* switch away from the initial page table */
    load_cr3(swapper_pg_dir);
    __flush_tlb_all();
#endif

    /* otherwise gcc will move up smp_processor_id before the cpu_init */
    barrier();
    /*
     * Check TSC synchronization with the BP:
     */
    check_tsc_sync_target();

    /*
     * Enable the espfix hack for this CPU
     */
#ifdef CONFIG_X86_ESPFIX64
    init_espfix_ap();
#endif

    /*
     * We need to hold vector_lock so there the set of online cpus
     * does not change while we are assigning vectors to cpus.  Holding
     * this lock ensures we don't half assign or remove an irq from a cpu.
     */
    lock_vector_lock();
    set_cpu_online(smp_processor_id(), true);
    unlock_vector_lock();
    per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
    x86_platform.nmi_init();

    /* enable local interrupts */
    local_irq_enable();

    /* to prevent fake stack check failure in clock setup */
    boot_init_stack_canary();

    x86_cpuinit.setup_percpu_clockev();

    wmb();
    cpu_startup_entry(CPUHP_ONLINE);
}
Beispiel #9
0
static void flush_kernel_map(void *dummy) 
{ 
	/* Could use CLFLUSH here if the CPU supports it (Hammer,P4) */
	if (boot_cpu_data.x86_model >= 4) 
		wbinvd();
	/* Flush all to work around Errata in early athlons regarding 
	 * large page flushing. 
	 */
	__flush_tlb_all(); 	
}
void __init efi_call_phys_epilog(void)
{
	/*
	 * After the lock is released, the original page table is restored.
	 */
	set_pgd(pgd_offset_k(0x0UL), save_pgd);
	__flush_tlb_all();
	local_irq_restore(efi_flags);
	early_code_mapping_set_exec(0);
}
/*==========================================================================*
 * Name:         smp_flush_tlb_all
 *
 * Description:  This routine flushes all processes TLBs.
 *               1.Request other CPU to execute 'flush_tlb_all_ipi()'.
 *               2.Execute 'do_flush_tlb_all_local()'.
 *
 * Born on Date: 2002.02.05
 *
 * Arguments:    NONE
 *
 * Returns:      void (cannot fail)
 *
 * Modification log:
 * Date       Who Description
 * ---------- --- --------------------------------------------------------
 *
 *==========================================================================*/
void smp_flush_tlb_all(void)
{
	unsigned long flags;

	preempt_disable();
	local_irq_save(flags);
	__flush_tlb_all();
	local_irq_restore(flags);
	smp_call_function(flush_tlb_all_ipi, NULL, 1);
	preempt_enable();
}
void __init efi_call_phys_prelog(void)
{
	unsigned long vaddress;

	early_code_mapping_set_exec(1);
	local_irq_save(efi_flags);
	vaddress = (unsigned long)__va(0x0UL);
	save_pgd = *pgd_offset_k(0x0UL);
	set_pgd(pgd_offset_k(0x0UL), *pgd_offset_k(vaddress));
	__flush_tlb_all();
}
Beispiel #13
0
void __init efi_call_phys_epilog(pgd_t *save_pgd)
{
	struct desc_ptr gdt_descr;

	gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
	gdt_descr.size = GDT_SIZE - 1;
	load_gdt(&gdt_descr);

	load_cr3(save_pgd);
	__flush_tlb_all();
}
void __init efi_call_phys_epilog(void)
{
	int pgd;
	int n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
	for (pgd = 0; pgd < n_pgds; pgd++)
		set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
	kfree(save_pgd);
	__flush_tlb_all();
	local_irq_restore(efi_flags);
	early_code_mapping_set_exec(0);
}
Beispiel #15
0
void kernel_map_pages(struct page *page, int numpages, int enable)
{
	if (PageHighMem(page))
		return;
	/* the return value is ignored - the calls cannot fail,
	 * large pages are disabled at boot time.
	 */
	change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
	/* we should perform an IPI and flush all tlbs,
	 * but that can deadlock->flush only current cpu.
	 */
	__flush_tlb_all();
}
/*
 * Activate a secondary processor.
 */
static void notrace start_secondary(void *unused)
{
	/*
	 * Don't put *anything* before cpu_init(), SMP booting is too
	 * fragile that we want to limit the things done here to the
	 * most necessary things.
	 */
	cpu_init();
	x86_cpuinit.early_percpu_clock_init();
	preempt_disable();
	smp_callin();

	enable_start_cpu0 = 0;

#ifdef CONFIG_X86_32
	/* switch away from the initial page table */
	load_cr3(swapper_pg_dir);
	__flush_tlb_all();
#endif

	/* otherwise gcc will move up smp_processor_id before the cpu_init */
	barrier();
	/*
	 * Check TSC synchronization with the BP:
	 */
	check_tsc_sync_target();

	/*
	 * Lock vector_lock and initialize the vectors on this cpu
	 * before setting the cpu online. We must set it online with
	 * vector_lock held to prevent a concurrent setup/teardown
	 * from seeing a half valid vector space.
	 */
	lock_vector_lock();
	setup_vector_irq(smp_processor_id());
	set_cpu_online(smp_processor_id(), true);
	unlock_vector_lock();
	cpu_set_state_online(smp_processor_id());
	x86_platform.nmi_init();

	/* enable local interrupts */
	local_irq_enable();

	/* to prevent fake stack check failure in clock setup */
	boot_init_stack_canary();

	x86_cpuinit.setup_percpu_clockev();

	wmb();
	cpu_startup_entry(CPUHP_ONLINE);
}
Beispiel #17
0
void __init efi_call_phys_epilog(pgd_t *save_pgd)
{
	/*
	 * After the lock is released, the original page table is restored.
	 */
	int pgd_idx;
	int nr_pgds;

	if (!efi_enabled(EFI_OLD_MEMMAP)) {
		write_cr3((unsigned long)save_pgd);
		__flush_tlb_all();
		return;
	}

	nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);

	for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++)
		set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);

	kfree(save_pgd);

	__flush_tlb_all();
	early_code_mapping_set_exec(0);
}
Beispiel #18
0
pgd_t * __init efi_call_phys_prolog(void)
{
	struct desc_ptr gdt_descr;
	pgd_t *save_pgd;

	/* Current pgd is swapper_pg_dir, we'll restore it later: */
	save_pgd = swapper_pg_dir;
	load_cr3(initial_page_table);
	__flush_tlb_all();

	gdt_descr.address = __pa(get_cpu_gdt_table(0));
	gdt_descr.size = GDT_SIZE - 1;
	load_gdt(&gdt_descr);

	return save_pgd;
}
Beispiel #19
0
void __init efi_call_phys_epilog(void)
{
	struct desc_ptr gdt_descr;

	gdt_descr.address = get_cpu_gdt_table(0);
	gdt_descr.size = GDT_SIZE - 1;
	load_gdt(&gdt_descr);

	clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);

	/*
	 * After the lock is released, the original page table is restored.
	 */
	__flush_tlb_all();

	local_irq_restore(efi_rt_eflags);
}
Beispiel #20
0
void __init efi_call_phys_epilog(void)
{
	/*
	 * After the lock is released, the original page table is restored.
	 */
	int pgd;
	int n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);

	if (!efi_enabled(EFI_OLD_MEMMAP))
		return;

	for (pgd = 0; pgd < n_pgds; pgd++)
		set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
	kfree(save_pgd);
	__flush_tlb_all();
	local_irq_restore(efi_flags);
	early_code_mapping_set_exec(0);
}
void __init efi_call_phys_prelog(void)
{
	unsigned long vaddress;
	int pgd;
	int n_pgds;

	early_code_mapping_set_exec(1);
	local_irq_save(efi_flags);

	n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
	save_pgd = kmalloc(n_pgds * sizeof(pgd_t), GFP_KERNEL);

	for (pgd = 0; pgd < n_pgds; pgd++) {
		save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE);
		vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
		set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
	}
	__flush_tlb_all();
}
Beispiel #22
0
void __init efi_call_phys_epilog(void)
{
	/*
	 * After the lock is released, the original page table is restored.
	 */
	int pgd;
	int n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
	for (pgd = 0; pgd < n_pgds; pgd++)
		set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
	kfree(save_pgd);

#ifdef CONFIG_PAX_PER_CPU_PGD
	load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
#endif

	__flush_tlb_all();
	local_irq_restore(efi_flags);
	early_code_mapping_set_exec(0);
}
Beispiel #23
0
void __init efi_call_phys_prelog(void)
{
	struct desc_ptr gdt_descr;

	local_irq_save(efi_rt_eflags);


	clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
	clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
			min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));

	/*
	 * After the lock is released, the original page table is restored.
	 */
	__flush_tlb_all();

	gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
	gdt_descr.size = GDT_SIZE - 1;
	load_gdt(&gdt_descr);
}
Beispiel #24
0
void efi_call_phys_prelog(void)
{
	unsigned long cr4;
	unsigned long temp;
	struct desc_ptr gdt_descr;

	local_irq_save(efi_rt_eflags);

	/*
	 * If I don't have PAE, I should just duplicate two entries in page
	 * directory. If I have PAE, I just need to duplicate one entry in
	 * page directory.
	 */
	cr4 = read_cr4();

	if (cr4 & X86_CR4_PAE) {
		efi_bak_pg_dir_pointer[0].pgd =
		    swapper_pg_dir[pgd_index(0)].pgd;
		swapper_pg_dir[0].pgd =
		    swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
	} else {
		efi_bak_pg_dir_pointer[0].pgd =
		    swapper_pg_dir[pgd_index(0)].pgd;
		efi_bak_pg_dir_pointer[1].pgd =
		    swapper_pg_dir[pgd_index(0x400000)].pgd;
		swapper_pg_dir[pgd_index(0)].pgd =
		    swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
		temp = PAGE_OFFSET + 0x400000;
		swapper_pg_dir[pgd_index(0x400000)].pgd =
		    swapper_pg_dir[pgd_index(temp)].pgd;
	}

	/*
	 * After the lock is released, the original page table is restored.
	 */
	__flush_tlb_all();

	gdt_descr.address = __pa(get_cpu_gdt_table(0));
	gdt_descr.size = GDT_SIZE - 1;
	load_gdt(&gdt_descr);
}
Beispiel #25
0
/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
   This runs before bootmem is initialized and gets pages directly from the 
   physical memory. To access them they are temporarily mapped. */
void __init_refok init_memory_mapping(unsigned long start, unsigned long end)
{ 
	unsigned long next; 

	Dprintk("init_memory_mapping\n");

	/* 
	 * Find space for the kernel direct mapping tables.
	 * Later we should allocate these tables in the local node of the memory
	 * mapped.  Unfortunately this is done currently before the nodes are 
	 * discovered.
	 */
	if (!after_bootmem)
		find_early_table_space(end);

	start = (unsigned long)__va(start);
	end = (unsigned long)__va(end);

	for (; start < end; start = next) {
		unsigned long pud_phys; 
		pgd_t *pgd = pgd_offset_k(start);
		pud_t *pud;

		if (after_bootmem)
			pud = pud_offset(pgd, start & PGDIR_MASK);
		else
			pud = alloc_low_page(&pud_phys);

		next = start + PGDIR_SIZE;
		if (next > end) 
			next = end; 
		phys_pud_init(pud, __pa(start), __pa(next));
		if (!after_bootmem)
			set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
		unmap_low_page(pud);
	} 

	if (!after_bootmem)
		mmu_cr4_features = read_cr4();
	__flush_tlb_all();
}
Beispiel #26
0
/*
 * Enables/disables executability of a given kernel page and
 * returns the previous setting.
 */
int __init set_kernel_exec(unsigned long vaddr, int enable)
{
	pte_t *pte;
	int ret = 1;

	if (!nx_enabled)
		goto out;

	pte = lookup_address(vaddr);
	BUG_ON(!pte);

	if (!pte_exec_kernel(*pte))
		ret = 0;

	if (enable)
		pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
	else
		pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
	__flush_tlb_all();
out:
	return ret;
}
Beispiel #27
0
void __init efi_call_phys_prolog(void)
{
	struct desc_ptr gdt_descr;

#ifdef CONFIG_PAX_KERNEXEC
	struct desc_struct d;
#endif

	local_irq_save(efi_rt_eflags);

	load_cr3(initial_page_table);
	__flush_tlb_all();

#ifdef CONFIG_PAX_KERNEXEC
	pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
	write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
	pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
	write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
#endif

	gdt_descr.address = __pa(get_cpu_gdt_table(0));
	gdt_descr.size = GDT_SIZE - 1;
	load_gdt(&gdt_descr);
}
Beispiel #28
0
/*
 * paging_init() sets up the page tables - note that the first 8MB are
 * already mapped by head.S.
 *
 * This routines also unmaps the page at virtual kernel address 0, so
 * that we can trap those pesky NULL-reference errors in the kernel.
 */
void __init paging_init(void)
{
#ifdef CONFIG_X86_PAE
	set_nx();
	if (nx_enabled)
		printk("NX (Execute Disable) protection: active\n");
#endif

	pagetable_init();

	load_cr3(swapper_pg_dir);

#ifdef CONFIG_X86_PAE
	/*
	 * We will bail out later - printk doesn't work right now so
	 * the user would just see a hanging kernel.
	 */
	if (cpu_has_pae)
		set_in_cr4(X86_CR4_PAE);
#endif
	__flush_tlb_all();

	kmap_init();
}
Beispiel #29
0
static void __init zap_identity_mappings(void)
{
	pgd_t *pgd = pgd_offset_k(0UL);
	pgd_clear(pgd);
	__flush_tlb_all();
}
Beispiel #30
0
void __init kasan_init(void)
{
	int i;
	void *shadow_cpu_entry_begin, *shadow_cpu_entry_end;

#ifdef CONFIG_KASAN_INLINE
	register_die_notifier(&kasan_die_notifier);
#endif

	memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));

	/*
	 * We use the same shadow offset for 4- and 5-level paging to
	 * facilitate boot-time switching between paging modes.
	 * As result in 5-level paging mode KASAN_SHADOW_START and
	 * KASAN_SHADOW_END are not aligned to PGD boundary.
	 *
	 * KASAN_SHADOW_START doesn't share PGD with anything else.
	 * We claim whole PGD entry to make things easier.
	 *
	 * KASAN_SHADOW_END lands in the last PGD entry and it collides with
	 * bunch of things like kernel code, modules, EFI mapping, etc.
	 * We need to take extra steps to not overwrite them.
	 */
	if (pgtable_l5_enabled()) {
		void *ptr;

		ptr = (void *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END));
		memcpy(tmp_p4d_table, (void *)ptr, sizeof(tmp_p4d_table));
		set_pgd(&early_top_pgt[pgd_index(KASAN_SHADOW_END)],
				__pgd(__pa(tmp_p4d_table) | _KERNPG_TABLE));
	}

	load_cr3(early_top_pgt);
	__flush_tlb_all();

	clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END);

	kasan_populate_early_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK),
			kasan_mem_to_shadow((void *)PAGE_OFFSET));

	for (i = 0; i < E820_MAX_ENTRIES; i++) {
		if (pfn_mapped[i].end == 0)
			break;

		map_range(&pfn_mapped[i]);
	}

	shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE;
	shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin);
	shadow_cpu_entry_begin = (void *)round_down(
			(unsigned long)shadow_cpu_entry_begin, PAGE_SIZE);

	shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE +
					CPU_ENTRY_AREA_MAP_SIZE);
	shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end);
	shadow_cpu_entry_end = (void *)round_up(
			(unsigned long)shadow_cpu_entry_end, PAGE_SIZE);

	kasan_populate_early_shadow(
		kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
		shadow_cpu_entry_begin);

	kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
			      (unsigned long)shadow_cpu_entry_end, 0);

	kasan_populate_early_shadow(shadow_cpu_entry_end,
			kasan_mem_to_shadow((void *)__START_KERNEL_map));

	kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
			      (unsigned long)kasan_mem_to_shadow(_end),
			      early_pfn_to_nid(__pa(_stext)));

	kasan_populate_early_shadow(kasan_mem_to_shadow((void *)MODULES_END),
					(void *)KASAN_SHADOW_END);

	load_cr3(init_top_pgt);
	__flush_tlb_all();

	/*
	 * kasan_early_shadow_page has been used as early shadow memory, thus
	 * it may contain some garbage. Now we can clear and write protect it,
	 * since after the TLB flush no one should write to it.
	 */
	memset(kasan_early_shadow_page, 0, PAGE_SIZE);
	for (i = 0; i < PTRS_PER_PTE; i++) {
		pte_t pte;
		pgprot_t prot;

		prot = __pgprot(__PAGE_KERNEL_RO | _PAGE_ENC);
		pgprot_val(prot) &= __default_kernel_pte_mask;

		pte = __pte(__pa(kasan_early_shadow_page) | pgprot_val(prot));
		set_pte(&kasan_early_shadow_pte[i], pte);
	}
	/* Flush TLBs again to be sure that write protection applied. */
	__flush_tlb_all();

	init_task.kasan_depth = 0;
	pr_info("KernelAddressSanitizer initialized\n");
}