示例#1
0
static int set_up_temporary_mappings(void)
{
	unsigned long start, end, next;
	int error;

	temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC);
	if (!temp_level4_pgt)
		return -ENOMEM;

	/* It is safe to reuse the original kernel mapping */
	set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
		init_level4_pgt[pgd_index(__START_KERNEL_map)]);

	/* Set up the direct mapping from scratch */
	start = (unsigned long)pfn_to_kaddr(0);
	end = (unsigned long)pfn_to_kaddr(max_pfn);

	for (; start < end; start = next) {
		pud_t *pud = (pud_t *)get_safe_page(GFP_ATOMIC);
		if (!pud)
			return -ENOMEM;
		next = start + PGDIR_SIZE;
		if (next > end)
			next = end;
		if ((error = res_phys_pud_init(pud, __pa(start), __pa(next))))
			return error;
		set_pgd(temp_level4_pgt + pgd_index(start),
			mk_kernel_pgd(__pa(pud)));
	}
	return 0;
}
示例#2
0
static int set_up_temporary_text_mapping(pgd_t *pgd)
{
	pmd_t *pmd;
	pud_t *pud;
	p4d_t *p4d = NULL;
	pgprot_t pgtable_prot = __pgprot(_KERNPG_TABLE);
	pgprot_t pmd_text_prot = __pgprot(__PAGE_KERNEL_LARGE_EXEC);

	/* Filter out unsupported __PAGE_KERNEL* bits: */
	pgprot_val(pmd_text_prot) &= __default_kernel_pte_mask;
	pgprot_val(pgtable_prot)  &= __default_kernel_pte_mask;

	/*
	 * The new mapping only has to cover the page containing the image
	 * kernel's entry point (jump_address_phys), because the switch over to
	 * it is carried out by relocated code running from a page allocated
	 * specifically for this purpose and covered by the identity mapping, so
	 * the temporary kernel text mapping is only needed for the final jump.
	 * Moreover, in that mapping the virtual address of the image kernel's
	 * entry point must be the same as its virtual address in the image
	 * kernel (restore_jump_address), so the image kernel's
	 * restore_registers() code doesn't find itself in a different area of
	 * the virtual address space after switching over to the original page
	 * tables used by the image kernel.
	 */

	if (pgtable_l5_enabled()) {
		p4d = (p4d_t *)get_safe_page(GFP_ATOMIC);
		if (!p4d)
			return -ENOMEM;
	}

	pud = (pud_t *)get_safe_page(GFP_ATOMIC);
	if (!pud)
		return -ENOMEM;

	pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
	if (!pmd)
		return -ENOMEM;

	set_pmd(pmd + pmd_index(restore_jump_address),
		__pmd((jump_address_phys & PMD_MASK) | pgprot_val(pmd_text_prot)));
	set_pud(pud + pud_index(restore_jump_address),
		__pud(__pa(pmd) | pgprot_val(pgtable_prot)));
	if (p4d) {
		p4d_t new_p4d = __p4d(__pa(pud) | pgprot_val(pgtable_prot));
		pgd_t new_pgd = __pgd(__pa(p4d) | pgprot_val(pgtable_prot));

		set_p4d(p4d + p4d_index(restore_jump_address), new_p4d);
		set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
	} else {
		/* No p4d for 4-level paging: point the pgd to the pud page table */
		pgd_t new_pgd = __pgd(__pa(pud) | pgprot_val(pgtable_prot));
		set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
	}

	return 0;
}
示例#3
0
/*H:435
 * And this is us, creating the new page directory.  If we really do
 * allocate a new one (and so the kernel parts are not there), we set
 * blank_pgdir.
 */
static unsigned int new_pgdir(struct lg_cpu *cpu,
			      unsigned long gpgdir,
			      int *blank_pgdir)
{
	unsigned int next;
#ifdef CONFIG_X86_PAE
	pmd_t *pmd_table;
#endif

	/*
	 * We pick one entry at random to throw out.  Choosing the Least
	 * Recently Used might be better, but this is easy.
	 */
	next = random32() % ARRAY_SIZE(cpu->lg->pgdirs);
	/* If it's never been allocated at all before, try now. */
	if (!cpu->lg->pgdirs[next].pgdir) {
		cpu->lg->pgdirs[next].pgdir =
					(pgd_t *)get_zeroed_page(GFP_KERNEL);
		/* If the allocation fails, just keep using the one we have */
		if (!cpu->lg->pgdirs[next].pgdir)
			next = cpu->cpu_pgd;
		else {
#ifdef CONFIG_X86_PAE
			/*
			 * In PAE mode, allocate a pmd page and populate the
			 * last pgd entry.
			 */
			pmd_table = (pmd_t *)get_zeroed_page(GFP_KERNEL);
			if (!pmd_table) {
				free_page((long)cpu->lg->pgdirs[next].pgdir);
				set_pgd(cpu->lg->pgdirs[next].pgdir, __pgd(0));
				next = cpu->cpu_pgd;
			} else {
				set_pgd(cpu->lg->pgdirs[next].pgdir +
					SWITCHER_PGD_INDEX,
					__pgd(__pa(pmd_table) | _PAGE_PRESENT));
				/*
				 * This is a blank page, so there are no kernel
				 * mappings: caller must map the stack!
				 */
				*blank_pgdir = 1;
			}
#else
			*blank_pgdir = 1;
#endif
		}
	}
	/* Record which Guest toplevel this shadows. */
	cpu->lg->pgdirs[next].gpgdir = gpgdir;
	/* Release all the non-kernel mappings. */
	flush_user_mappings(cpu->lg, next);

	return next;
}
示例#4
0
pgd_t *pgd_alloc(struct mm_struct *mm)
{
	int i;
	pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);

	pgd_test_and_unpin(pgd);

	if (PTRS_PER_PMD == 1 || !pgd)
		return pgd;

	for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
		pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
		if (!pmd)
			goto out_oom;
		set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
	}

	if (!HAVE_SHARED_KERNEL_PMD) {
		unsigned long flags;

		for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
			pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
			if (!pmd)
				goto out_oom;
			set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
		}

		spin_lock_irqsave(&pgd_lock, flags);
		for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
			unsigned long v = (unsigned long)i << PGDIR_SHIFT;
			pgd_t *kpgd = pgd_offset_k(v);
			pud_t *kpud = pud_offset(kpgd, v);
			pmd_t *kpmd = pmd_offset(kpud, v);
			pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
			memcpy(pmd, kpmd, PAGE_SIZE);
			make_lowmem_page_readonly(
				pmd, XENFEAT_writable_page_tables);
		}
		pgd_list_add(pgd);
		spin_unlock_irqrestore(&pgd_lock, flags);
	}

	return pgd;

out_oom:
	for (i--; i >= 0; i--)
		kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
	kmem_cache_free(pgd_cache, pgd);
	return NULL;
}
/*
 * Create PGD aligned trampoline table to allow real mode initialization
 * of additional CPUs. Consume only 1 low memory page.
 */
void __meminit init_trampoline(void)
{
	unsigned long paddr, paddr_next;
	pgd_t *pgd;
	pud_t *pud_page, *pud_page_tramp;
	int i;

	if (!kaslr_memory_enabled()) {
		init_trampoline_default();
		return;
	}

	pud_page_tramp = alloc_low_page();

	paddr = 0;
	pgd = pgd_offset_k((unsigned long)__va(paddr));
	pud_page = (pud_t *) pgd_page_vaddr(*pgd);

	for (i = pud_index(paddr); i < PTRS_PER_PUD; i++, paddr = paddr_next) {
		pud_t *pud, *pud_tramp;
		unsigned long vaddr = (unsigned long)__va(paddr);

		pud_tramp = pud_page_tramp + pud_index(paddr);
		pud = pud_page + pud_index(vaddr);
		paddr_next = (paddr & PUD_MASK) + PUD_SIZE;

		*pud_tramp = *pud;
	}

	set_pgd(&trampoline_pgd_entry,
		__pgd(_KERNPG_TABLE | __pa(pud_page_tramp)));
}
示例#6
0
/*
 * set up paging
 */
void __init paging_init(void)
{
	unsigned long zones_size[MAX_NR_ZONES] = {0,};
	pte_t *ppte;
	int loop;

	/* main kernel space -> RAM mapping is handled as 1:1 transparent by
	 * the MMU */
	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
	memset(kernel_vmalloc_ptes, 0, sizeof(kernel_vmalloc_ptes));

	/* load the VMALLOC area PTE table addresses into the kernel PGD */
	ppte = kernel_vmalloc_ptes;
	for (loop = VMALLOC_START / (PAGE_SIZE * PTRS_PER_PTE);
	     loop < VMALLOC_END / (PAGE_SIZE * PTRS_PER_PTE);
	     loop++
	     ) {
		set_pgd(swapper_pg_dir + loop, __pgd(__pa(ppte) | _PAGE_TABLE));
		ppte += PAGE_SIZE / sizeof(pte_t);
	}

	/* declare the sizes of the RAM zones (only use the normal zone) */
	zones_size[ZONE_NORMAL] =
		contig_page_data.bdata->node_low_pfn -
		contig_page_data.bdata->node_min_pfn;

	/* pass the memory from the bootmem allocator to the main allocator */
	free_area_init(zones_size);

	__flush_tlb_all();
}
static void shmedia_mapioaddr(unsigned long pa, unsigned long va)
{
	pgd_t *pgdp;
	pmd_t *pmdp;
	pte_t *ptep;

	unsigned long flags = 1; /* 1 = CB0-1 device */


	DEBUG_IOREMAP(("shmedia_mapiopage pa %08x va %08x\n",  pa, va));

	pgdp = pgd_offset_k(va);
	if (pgd_none(*pgdp)) {
		pmdp = alloc_bootmem_low_pages(PTRS_PER_PMD * sizeof(pmd_t));
		if (pmdp == NULL) panic("No memory for pmd\n");
		memset(pmdp, 0, PTRS_PER_PGD * sizeof(pmd_t));
		set_pgd(pgdp, __pgd((unsigned long)pmdp | _KERNPG_TABLE));
	}

	pmdp = pmd_offset(pgdp, va);
	if (pmd_none(*pmdp)) {
		ptep = alloc_bootmem_low_pages(PTRS_PER_PTE * sizeof(pte_t));
		if (ptep == NULL) panic("No memory for pte\n");
		clear_page((void *)ptep);
		set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE));
	}

	ptep = pte_offset(pmdp, va);
	set_pte(ptep, mk_pte_phys(pa, __pgprot(_PAGE_PRESENT |
			_PAGE_READ | _PAGE_WRITE | 
			_PAGE_DIRTY | _PAGE_ACCESSED |_PAGE_SHARED | flags)));
}
示例#8
0
static void init_low_mapping(void)
{
	pgd_t *slot0 = pgd_offset(current->mm, 0UL);
	low_ptr = *slot0;
	set_pgd(slot0, *pgd_offset(current->mm, PAGE_OFFSET));
	flush_tlb_all();
}
示例#9
0
/*
 * acpi_restore_state
 */
void acpi_restore_state_mem(void)
{
#ifndef CONFIG_ACPI_PV_SLEEP
	set_pgd(pgd_offset(current->mm, 0UL), low_ptr);
	local_flush_tlb();
#endif
}
示例#10
0
文件: pgtable.c 项目: wesen/lemonix
pgd_t *pgd_alloc(struct mm_struct *mm)
{
    int i;
    pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);

    if (PTRS_PER_PMD == 1 || !pgd)
        return pgd;

    for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
        pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
        if (!pmd)
            goto out_oom;
        paravirt_alloc_pd(__pa(pmd) >> PAGE_SHIFT);
        set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
    }
    return pgd;

out_oom:
    for (i--; i >= 0; i--) {
        pgd_t pgdent = pgd[i];
        void* pmd = (void *)__va(pgd_val(pgdent)-1);
        paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
        kmem_cache_free(pmd_cache, pmd);
    }
    kmem_cache_free(pgd_cache, pgd);
    return NULL;
}
示例#11
0
pgd_t *pgd_alloc(struct mm_struct *mm)
{
    int i;
    pgd_t *pgd = quicklist_alloc(0, GFP_KERNEL, pgd_ctor);

    if (PTRS_PER_PMD == 1 || !pgd)
        return pgd;

    for (i = 0; i < UNSHARED_PTRS_PER_PGD; ++i) {
        pmd_t *pmd = pmd_cache_alloc(i);

        if (!pmd)
            goto out_oom;

        paravirt_alloc_pd(__pa(pmd) >> PAGE_SHIFT);
        set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
    }
    return pgd;

out_oom:
    for (i--; i >= 0; i--) {
        pgd_t pgdent = pgd[i];
        void* pmd = (void *)__va(pgd_val(pgdent)-1);
        paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
        pmd_cache_free(pmd, i);
    }
    quicklist_free(0, pgd_dtor, pgd);
    return NULL;
}
示例#12
0
int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
			      unsigned long pstart, unsigned long pend)
{
	unsigned long addr = pstart + info->offset;
	unsigned long end = pend + info->offset;
	unsigned long next;
	int result;

	for (; addr < end; addr = next) {
		pgd_t *pgd = pgd_page + pgd_index(addr);
		pud_t *pud;

		next = (addr & PGDIR_MASK) + PGDIR_SIZE;
		if (next > end)
			next = end;

		if (pgd_present(*pgd)) {
			pud = pud_offset(pgd, 0);
			result = ident_pud_init(info, pud, addr, next);
			if (result)
				return result;
			continue;
		}

		pud = (pud_t *)info->alloc_pgt_page(info->context);
		if (!pud)
			return -ENOMEM;
		result = ident_pud_init(info, pud, addr, next);
		if (result)
			return result;
		set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
	}

	return 0;
}
示例#13
0
pgd_t * __init efi_call_phys_prolog(void)
{
	unsigned long vaddress;
	pgd_t *save_pgd;

	int pgd;
	int n_pgds;

	if (!efi_enabled(EFI_OLD_MEMMAP)) {
		save_pgd = (pgd_t *)read_cr3();
		write_cr3((unsigned long)efi_scratch.efi_pgt);
		goto out;
	}

	early_code_mapping_set_exec(1);

	n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
	save_pgd = kmalloc_array(n_pgds, sizeof(*save_pgd), GFP_KERNEL);

	for (pgd = 0; pgd < n_pgds; pgd++) {
		save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE);
		vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
		set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
	}
out:
	__flush_tlb_all();

	return save_pgd;
}
示例#14
0
文件: tlb.c 项目: AlexShiLucky/linux
static void sync_current_stack_to_mm(struct mm_struct *mm)
{
	unsigned long sp = current_stack_pointer;
	pgd_t *pgd = pgd_offset(mm, sp);

	if (pgtable_l5_enabled()) {
		if (unlikely(pgd_none(*pgd))) {
			pgd_t *pgd_ref = pgd_offset_k(sp);

			set_pgd(pgd, *pgd_ref);
		}
	} else {
		/*
		 * "pgd" is faked.  The top level entries are "p4d"s, so sync
		 * the p4d.  This compiles to approximately the same code as
		 * the 5-level case.
		 */
		p4d_t *p4d = p4d_offset(pgd, sp);

		if (unlikely(p4d_none(*p4d))) {
			pgd_t *pgd_ref = pgd_offset_k(sp);
			p4d_t *p4d_ref = p4d_offset(pgd_ref, sp);

			set_p4d(p4d, *p4d_ref);
		}
	}
}
示例#15
0
static int set_up_temporary_text_mapping(pgd_t *pgd)
{
	pmd_t *pmd;
	pud_t *pud;

	/*
	 * The new mapping only has to cover the page containing the image
	 * kernel's entry point (jump_address_phys), because the switch over to
	 * it is carried out by relocated code running from a page allocated
	 * specifically for this purpose and covered by the identity mapping, so
	 * the temporary kernel text mapping is only needed for the final jump.
	 * Moreover, in that mapping the virtual address of the image kernel's
	 * entry point must be the same as its virtual address in the image
	 * kernel (restore_jump_address), so the image kernel's
	 * restore_registers() code doesn't find itself in a different area of
	 * the virtual address space after switching over to the original page
	 * tables used by the image kernel.
	 */
	pud = (pud_t *)get_safe_page(GFP_ATOMIC);
	if (!pud)
		return -ENOMEM;

	pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
	if (!pmd)
		return -ENOMEM;

	set_pmd(pmd + pmd_index(restore_jump_address),
		__pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC));
	set_pud(pud + pud_index(restore_jump_address),
		__pud(__pa(pmd) | _KERNPG_TABLE));
	set_pgd(pgd + pgd_index(restore_jump_address),
		__pgd(__pa(pud) | _KERNPG_TABLE));

	return 0;
}
示例#16
0
static void __init kasan_early_p4d_populate(pgd_t *pgd,
		unsigned long addr,
		unsigned long end)
{
	pgd_t pgd_entry;
	p4d_t *p4d, p4d_entry;
	unsigned long next;

	if (pgd_none(*pgd)) {
		pgd_entry = __pgd(_KERNPG_TABLE |
					__pa_nodebug(kasan_early_shadow_p4d));
		set_pgd(pgd, pgd_entry);
	}

	p4d = early_p4d_offset(pgd, addr);
	do {
		next = p4d_addr_end(addr, end);

		if (!p4d_none(*p4d))
			continue;

		p4d_entry = __p4d(_KERNPG_TABLE |
					__pa_nodebug(kasan_early_shadow_pud));
		set_p4d(p4d, p4d_entry);
	} while (p4d++, addr = next, addr != end && p4d_none(*p4d));
}
示例#17
0
static int __do_vmalloc_fault(unsigned long addr, struct mm_struct *mm)
{
	/* Synchronise this task's top level page-table
	 * with the 'reference' page table.
	 */
	int offset = __pgd_offset(addr);
	pgd_t *pgd, *pgd_k;
	pmd_t *pmd, *pmd_k;

	pgd_k = init_mm.pgd + offset;
	if (!pgd_present(*pgd_k))
		goto bad_area;

	pgd = mm->pgd + offset;
#if 0	/* note that we are two-level */
	if (!pgd_present(*pgd))
		set_pgd(pgd, *pgd_k);
#endif

	pmd_k = pmd_offset(pgd_k, addr);
	if (pmd_none(*pmd_k))
		goto bad_area;

	pmd = pmd_offset(pgd, addr);
	if (!pmd_none(*pmd))
		goto bad_area;
	set_pmd(pmd, *pmd_k);
	return 1;

bad_area:
	return -2;
}
示例#18
0
static void __init pagetable_init (void)
{
	unsigned long vaddr;
	pgd_t *pgd_base = swapper_pg_dir;

#ifdef CONFIG_X86_PAE
	int i;
	/* Init entries of the first-level page table to the zero page */
	for (i = 0; i < PTRS_PER_PGD; i++)
		set_pgd(pgd_base + i, __pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
#endif

	/* Enable PSE if available */
	if (cpu_has_pse) {
		set_in_cr4(X86_CR4_PSE);
	}

	/* Enable PGE if available */
	if (cpu_has_pge) {
		set_in_cr4(X86_CR4_PGE);
		__PAGE_KERNEL |= _PAGE_GLOBAL;
		__PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
	}

	kernel_physical_mapping_init(pgd_base);
	remap_numa_kva();

	/*
	 * Fixed mappings, only the page table structure has to be
	 * created - mappings will be set by set_fixmap():
	 */
	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
	page_table_range_init(vaddr, 0, pgd_base);

	permanent_kmaps_init(pgd_base);

#ifdef CONFIG_X86_PAE
	/*
	 * Add low memory identity-mappings - SMP needs it when
	 * starting up on an AP from real-mode. In the non-PAE
	 * case we already have these mappings through head.S.
	 * All user-space mappings are explicitly cleared after
	 * SMP startup.
	 */
	set_pgd(&pgd_base[0], pgd_base[USER_PTRS_PER_PGD]);
#endif
}
示例#19
0
static void init_low_mapping(void)
{
	pgd_t *slot0 = pgd_offset(current->mm, 0UL);
	low_ptr = *slot0;
	set_pgd(slot0, *pgd_offset(current->mm, PAGE_OFFSET));
	WARN_ON(num_online_cpus() != 1);
	local_flush_tlb();
}
示例#20
0
static void init_low_mapping(pgd_t *pgd, int pgd_limit)
{
	int pgd_ofs = 0;

	while ((pgd_ofs < pgd_limit) && (pgd_ofs + USER_PTRS_PER_PGD < PTRS_PER_PGD)) {
		set_pgd(pgd, *(pgd+USER_PTRS_PER_PGD));
		pgd_ofs++, pgd++;
	}
}
示例#21
0
void __init efi_call_phys_epilog(void)
{
	/*
	 * After the lock is released, the original page table is restored.
	 */
	set_pgd(pgd_offset_k(0x0UL), save_pgd);
	__flush_tlb_all();
	local_irq_restore(efi_flags);
	early_code_mapping_set_exec(0);
}
示例#22
0
void __init efi_call_phys_prelog(void)
{
	unsigned long vaddress;

	early_code_mapping_set_exec(1);
	local_irq_save(efi_flags);
	vaddress = (unsigned long)__va(0x0UL);
	save_pgd = *pgd_offset_k(0x0UL);
	set_pgd(pgd_offset_k(0x0UL), *pgd_offset_k(vaddress));
	__flush_tlb_all();
}
示例#23
0
void zap_low_mappings (void)
{
	int i;

	save_pg_dir();

	/*
	 * Zap initial low-memory mappings.
	 *
	 * Note that "pgd_clear()" doesn't do it for
	 * us, because pgd_clear() is a no-op on i386.
	 */
	for (i = 0; i < USER_PTRS_PER_PGD; i++)
#ifdef CONFIG_X86_PAE
		set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
#else
		set_pgd(swapper_pg_dir+i, __pgd(0));
#endif
	flush_tlb_all();
}
示例#24
0
文件: efi_64.c 项目: 3null/fastsocket
static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
{
	if (pgd_none(*pgd)) {
		pud_t *pud = (pud_t *)get_zeroed_page(GFP_ATOMIC);
		set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
		if (pud != pud_offset(pgd, 0))
			printk(KERN_ERR "EFI PAGETABLE BUG #00! %p <-> %p\n",
			       pud, pud_offset(pgd, 0));
	}
	return pud_offset(pgd, vaddr);
}
示例#25
0
static inline void resume_init_first_level_page_table(pgd_t *pg_dir)
{
#ifdef CONFIG_X86_PAE
	int i;

	/* Init entries of the first-level page table to the zero page */
	for (i = 0; i < PTRS_PER_PGD; i++)
		set_pgd(pg_dir + i,
			__pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
#endif
}
示例#26
0
static void __init clear_pgds(unsigned long start,
			unsigned long end)
{
	/*
	 * Remove references to kasan page tables from
	 * swapper_pg_dir. pgd_clear() can't be used
	 * here because it's nop on 2,3-level pagetable setups
	 */
	for (; start < end; start += PGDIR_SIZE)
		set_pgd(pgd_offset_k(start), __pgd(0));
}
示例#27
0
/*
 * Copy the current shadow region into a new pgdir.
 */
void __init kasan_copy_shadow(pgd_t *pgdir)
{
	pgd_t *pgdp, *pgdp_new, *pgdp_end;

	pgdp = pgd_offset_k(KASAN_SHADOW_START);
	pgdp_end = pgd_offset_k(KASAN_SHADOW_END);
	pgdp_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START);
	do {
		set_pgd(pgdp_new, READ_ONCE(*pgdp));
	} while (pgdp++, pgdp_new++, pgdp != pgdp_end);
}
示例#28
0
void __init efi_call_phys_epilog(void)
{
	int pgd;
	int n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
	for (pgd = 0; pgd < n_pgds; pgd++)
		set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
	kfree(save_pgd);
	__flush_tlb_all();
	local_irq_restore(efi_flags);
	early_code_mapping_set_exec(0);
}
示例#29
0
/*
 * Copy the current shadow region into a new pgdir.
 */
void __init kasan_copy_shadow(pgd_t *pgdir)
{
	pgd_t *pgd, *pgd_new, *pgd_end;

	pgd = pgd_offset_k(KASAN_SHADOW_START);
	pgd_end = pgd_offset_k(KASAN_SHADOW_END);
	pgd_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START);
	do {
		set_pgd(pgd_new, *pgd);
	} while (pgd++, pgd_new++, pgd != pgd_end);
}
示例#30
0
文件: kaslr.c 项目: avagin/linux
static void __meminit init_trampoline_pud(void)
{
	pud_t *pud_page_tramp, *pud, *pud_tramp;
	p4d_t *p4d_page_tramp, *p4d, *p4d_tramp;
	unsigned long paddr, vaddr;
	pgd_t *pgd;

	pud_page_tramp = alloc_low_page();

	/*
	 * There are two mappings for the low 1MB area, the direct mapping
	 * and the 1:1 mapping for the real mode trampoline:
	 *
	 * Direct mapping: virt_addr = phys_addr + PAGE_OFFSET
	 * 1:1 mapping:    virt_addr = phys_addr
	 */
	paddr = 0;
	vaddr = (unsigned long)__va(paddr);
	pgd = pgd_offset_k(vaddr);

	p4d = p4d_offset(pgd, vaddr);
	pud = pud_offset(p4d, vaddr);

	pud_tramp = pud_page_tramp + pud_index(paddr);
	*pud_tramp = *pud;

	if (pgtable_l5_enabled()) {
		p4d_page_tramp = alloc_low_page();

		p4d_tramp = p4d_page_tramp + p4d_index(paddr);

		set_p4d(p4d_tramp,
			__p4d(_KERNPG_TABLE | __pa(pud_page_tramp)));

		set_pgd(&trampoline_pgd_entry,
			__pgd(_KERNPG_TABLE | __pa(p4d_page_tramp)));
	} else {
		set_pgd(&trampoline_pgd_entry,
			__pgd(_KERNPG_TABLE | __pa(pud_page_tramp)));
	}
}