void __init init_espfix_bsp(void) { pgd_t *pgd_p; pud_t *pud_p; unsigned long index = pgd_index(ESPFIX_BASE_ADDR); /* Install the espfix pud into the kernel page directory */ pgd_p = &init_level4_pgt[index]; pud_p = espfix_pud_page; paravirt_alloc_pud(&init_mm, __pa(pud_p) >> PAGE_SHIFT); set_pgd(pgd_p, __pgd(PGTABLE_PROT | __pa(pud_p))); #ifdef CONFIG_PAX_PER_CPU_PGD clone_pgd_range(get_cpu_pgd(0, kernel) + index, swapper_pg_dir + index, 1); clone_pgd_range(get_cpu_pgd(0, user) + index, swapper_pg_dir + index, 1); #endif /* Randomize the locations */ init_espfix_random(); /* The rest is the same as for any other processor */ init_espfix_ap(0); }
void __init setup_trampoline_page_table(void) { #ifdef CONFIG_X86_32 /* Copy kernel address range */ clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY, swapper_pg_dir + KERNEL_PGD_BOUNDARY, KERNEL_PGD_PTRS); /* Initialize low mappings */ clone_pgd_range(trampoline_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY, min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY)); #endif }
void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused) { unsigned long flags; if (PTRS_PER_PMD > 1) { if (!xen_feature(XENFEAT_pae_pgdir_above_4gb)) { int rc = xen_create_contiguous_region( (unsigned long)pgd, 0, 32); BUG_ON(rc); } if (HAVE_SHARED_KERNEL_PMD) clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, KERNEL_PGD_PTRS); } else { spin_lock_irqsave(&pgd_lock, flags); clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, KERNEL_PGD_PTRS); memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); pgd_list_add(pgd); spin_unlock_irqrestore(&pgd_lock, flags); } }
static int efi_memory_callback(struct notifier_block *self, unsigned long action, void *arg) { switch (action) { case MEM_GOING_ONLINE: /* fall through */ case MEM_GOING_OFFLINE: clone_pgd_range(efi_pgd + KERNEL_PGD_BOUNDARY, swapper_pg_dir + KERNEL_PGD_BOUNDARY, KERNEL_PGD_PTRS); break; default: break; } return NOTIFY_OK; }
void __init efi_call_phys_epilog(void) { struct desc_ptr gdt_descr; gdt_descr.address = get_cpu_gdt_table(0); gdt_descr.size = GDT_SIZE - 1; load_gdt(&gdt_descr); clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS); /* * After the lock is released, the original page table is restored. */ __flush_tlb_all(); local_irq_restore(efi_rt_eflags); }
/* PAE pgd constructor */ static void pgd_ctor(void *pgd) { /* PAE, kernel PMD may be shared */ if (SHARED_KERNEL_PMD) { clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, KERNEL_PGD_PTRS); } else { unsigned long flags; memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); spin_lock_irqsave(&pgd_lock, flags); pgd_list_add(pgd); spin_unlock_irqrestore(&pgd_lock, flags); } }
void __init efi_pagetable_init(void) { efi_memory_desc_t *md; unsigned long size; u64 start_pfn, end_pfn, pfn, vaddr; void *p; pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; memset(efi_pgd, 0, sizeof(efi_pgd)); dmi_check_system(efi_quirk_table); for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { md = p; if (!(md->attribute & EFI_MEMORY_RUNTIME)) continue; start_pfn = md->phys_addr >> PAGE_SHIFT; size = md->num_pages << EFI_PAGE_SHIFT; end_pfn = PFN_UP(md->phys_addr + size); for (pfn = start_pfn; pfn <= end_pfn; pfn++) { unsigned long val; vaddr = pfn << PAGE_SHIFT; pgd = efi_pgd + pgd_index(vaddr); pud = fill_pud(pgd, vaddr); pmd = fill_pmd(pud, vaddr); pte = fill_pte(pmd, vaddr); if (md->type == EFI_RUNTIME_SERVICES_CODE) val = __PAGE_KERNEL_EXEC; else val = __PAGE_KERNEL; if (!(md->attribute & EFI_MEMORY_WB)) val |= _PAGE_CACHE_UC_MINUS; set_pte(pte, pfn_pte(pfn, __pgprot(val))); } } clone_pgd_range(efi_pgd + KERNEL_PGD_BOUNDARY, swapper_pg_dir + KERNEL_PGD_BOUNDARY, KERNEL_PGD_PTRS); hotplug_memory_notifier(efi_memory_callback, EFI_CALLBACK_PRI); }
void __init efi_call_phys_prelog(void) { struct desc_ptr gdt_descr; local_irq_save(efi_rt_eflags); clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS); clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY, min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY)); /* * After the lock is released, the original page table is restored. */ __flush_tlb_all(); gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0)); gdt_descr.size = GDT_SIZE - 1; load_gdt(&gdt_descr); }
/* Non-PAE pgd constructor */ static void pgd_ctor(void *pgd) { unsigned long flags; /* !PAE, no pagetable sharing */ memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); spin_lock_irqsave(&pgd_lock, flags); /* must happen under lock */ clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, KERNEL_PGD_PTRS); paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT, __pa(swapper_pg_dir) >> PAGE_SHIFT, USER_PTRS_PER_PGD, KERNEL_PGD_PTRS); pgd_list_add(pgd); spin_unlock_irqrestore(&pgd_lock, flags); }
static void pgd_ctor(pgd_t *pgd) { unsigned long flags; memset(pgd, 0, KERNEL_PGD_INDEX_START*sizeof(pgd_t)); spin_lock_irqsave(&pgd_lock, flags); #ifndef __tilegx__ /* * Check that the user interrupt vector has no L2. * It never should for the swapper, and new page tables * should always start with an empty user interrupt vector. */ BUG_ON(((u64 *)swapper_pg_dir)[pgd_index(MEM_USER_INTRPT)] != 0); #endif clone_pgd_range(pgd + KERNEL_PGD_INDEX_START, swapper_pg_dir + KERNEL_PGD_INDEX_START, KERNEL_PGD_PTRS); pgd_list_add(pgd); spin_unlock_irqrestore(&pgd_lock, flags); }
void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused) { unsigned long flags; if (PTRS_PER_PMD == 1) { memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); spin_lock_irqsave(&pgd_lock, flags); } clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, KERNEL_PGD_PTRS); if (PTRS_PER_PMD > 1) return; /* must happen under lock */ paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT, __pa(swapper_pg_dir) >> PAGE_SHIFT, USER_PTRS_PER_PGD, PTRS_PER_PGD - USER_PTRS_PER_PGD); pgd_list_add(pgd); spin_unlock_irqrestore(&pgd_lock, flags); }