void pgd_free(struct mm_struct *mm, pgd_t *pgd_base) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pgtable_t pte; if (!pgd_base) return; pgd = pgd_base + pgd_index(0); if (pgd_none_or_clear_bad(pgd)) goto no_pgd; pud = pud_offset(pgd, 0); if (pud_none_or_clear_bad(pud)) goto no_pud; pmd = pmd_offset(pud, 0); if (pmd_none_or_clear_bad(pmd)) goto no_pmd; pte = pmd_pgtable(*pmd); pmd_clear(pmd); pte_free(mm, pte); no_pmd: pud_clear(pud); pmd_free(mm, pmd); no_pud: pgd_clear(pgd); pud_free(mm, pud); no_pgd: #if defined(CONFIG_SYNO_ARMADA_ARCH) #ifdef CONFIG_ARM_LPAE /* * Free modules/pkmap or identity pmd tables. */ for (pgd = pgd_base; pgd < pgd_base + PTRS_PER_PGD; pgd++) { if (pgd_none_or_clear_bad(pgd)) continue; if (pgd_val(*pgd) & L_PGD_SWAPPER) continue; pud = pud_offset(pgd, 0); if (pud_none_or_clear_bad(pud)) continue; pmd = pmd_offset(pud, 0); pud_clear(pud); pmd_free(mm, pmd); pgd_clear(pgd); pud_free(mm, pud); } #endif __pgd_free(pgd_base); #elif defined(CONFIG_SYNO_COMCERTO) free_pages((unsigned long) pgd_base, get_order(16384)); #else free_pages((unsigned long) pgd_base, 2); #endif }
void pgd_free(struct mm_struct *mm, pgd_t *pgd_base) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pgtable_t pte; if (!pgd_base) return; pgd = pgd_base + pgd_index(0); if (pgd_none_or_clear_bad(pgd)) goto no_pgd; pud = pud_offset(pgd, 0); if (pud_none_or_clear_bad(pud)) goto no_pud; pmd = pmd_offset(pud, 0); if (pmd_none_or_clear_bad(pmd)) goto no_pmd; pte = pmd_pgtable(*pmd); pmd_clear(pmd); pte_free(mm, pte); mm_dec_nr_ptes(mm); no_pmd: pud_clear(pud); pmd_free(mm, pmd); mm_dec_nr_pmds(mm); no_pud: pgd_clear(pgd); pud_free(mm, pud); no_pgd: #ifdef CONFIG_ARM_LPAE /* * Free modules/pkmap or identity pmd tables. */ for (pgd = pgd_base; pgd < pgd_base + PTRS_PER_PGD; pgd++) { if (pgd_none_or_clear_bad(pgd)) continue; if (pgd_val(*pgd) & L_PGD_SWAPPER) continue; pud = pud_offset(pgd, 0); if (pud_none_or_clear_bad(pud)) continue; pmd = pmd_offset(pud, 0); pud_clear(pud); pmd_free(mm, pmd); mm_dec_nr_pmds(mm); pgd_clear(pgd); pud_free(mm, pud); } #endif __pgd_free(pgd_base); }
static int init_stub_pte(struct mm_struct *mm, unsigned long proc, unsigned long kernel) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; pgd = pgd_offset(mm, proc); pud = pud_alloc(mm, pgd, proc); if (!pud) goto out; pmd = pmd_alloc(mm, pud, proc); if (!pmd) goto out_pmd; pte = pte_alloc_map(mm, NULL, pmd, proc); if (!pte) goto out_pte; *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT)); *pte = pte_mkread(*pte); return 0; out_pte: pmd_free(mm, pmd); out_pmd: pud_free(mm, pud); out: return -ENOMEM; }
static bool kvm_mips_flush_gpa_pgd(pgd_t *pgd, unsigned long start_gpa, unsigned long end_gpa) { pud_t *pud; unsigned long end = ~0ul; int i_min = pgd_index(start_gpa); int i_max = pgd_index(end_gpa); bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PGD - 1); int i; for (i = i_min; i <= i_max; ++i, start_gpa = 0) { if (!pgd_present(pgd[i])) continue; pud = pud_offset(pgd + i, 0); if (i == i_max) end = end_gpa; if (kvm_mips_flush_gpa_pud(pud, start_gpa, end)) { pgd_clear(pgd + i); pud_free(NULL, pud); } else { safe_to_remove = false; } } return safe_to_remove; }
void pgd_free(struct mm_struct *mm, pgd_t *pgd_base) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pgtable_t pte; if (!pgd_base) return; pgd = pgd_base + pgd_index(0); if (pgd_none_or_clear_bad(pgd)) goto no_pgd; pud = pud_offset(pgd, 0); if (pud_none_or_clear_bad(pud)) goto no_pud; pmd = pmd_offset(pud, 0); if (pmd_none_or_clear_bad(pmd)) goto no_pmd; pte = pmd_pgtable(*pmd); pmd_clear(pmd); pte_free(mm, pte); no_pmd: pud_clear(pud); pmd_free(mm, pmd); no_pud: pgd_clear(pgd); pud_free(mm, pud); no_pgd: free_pages((unsigned long) pgd_base, 2); }
static void clear_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr) { pud_t *pud_table __maybe_unused = pud_offset(pgd, 0); pgd_clear(pgd); kvm_tlb_flush_vmid_ipa(kvm, addr); pud_free(NULL, pud_table); put_page(virt_to_page(pgd)); }
/* * need to get a 16k page for level 1 */ pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *new_pgd, *init_pgd; pud_t *new_pud, *init_pud; pmd_t *new_pmd, *init_pmd; pte_t *new_pte, *init_pte; new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2); if (!new_pgd) goto no_pgd; memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); /* * Copy over the kernel and IO PGD entries */ init_pgd = pgd_offset_k(0); memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); if (!vectors_high()) { /* * On ARM, first page must always be allocated since it * contains the machine vectors. */ new_pud = pud_alloc(mm, new_pgd, 0); if (!new_pud) goto no_pud; new_pmd = pmd_alloc(mm, new_pud, 0); if (!new_pmd) goto no_pmd; new_pte = pte_alloc_map(mm, NULL, new_pmd, 0); if (!new_pte) goto no_pte; init_pud = pud_offset(init_pgd, 0); init_pmd = pmd_offset(init_pud, 0); init_pte = pte_offset_map(init_pmd, 0); set_pte_ext(new_pte, *init_pte, 0); pte_unmap(init_pte); pte_unmap(new_pte); } return new_pgd; no_pte: pmd_free(mm, new_pmd); no_pmd: pud_free(mm, new_pud); no_pud: free_pages((unsigned long)new_pgd, 2); no_pgd: return NULL; }
static int init_stub_pte(struct mm_struct *mm, unsigned long proc, unsigned long kernel) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; pgd = pgd_offset(mm, proc); pud = pud_alloc(mm, pgd, proc); if (!pud) goto out; pmd = pmd_alloc(mm, pud, proc); if (!pmd) goto out_pmd; pte = pte_alloc_map(mm, pmd, proc); if (!pte) goto out_pte; /* There's an interaction between the skas0 stub pages, stack * randomization, and the BUG at the end of exit_mmap. exit_mmap * checks that the number of page tables freed is the same as had * been allocated. If the stack is on the last page table page, * then the stack pte page will be freed, and if not, it won't. To * avoid having to know where the stack is, or if the process mapped * something at the top of its address space for some other reason, * we set TASK_SIZE to end at the start of the last page table. * This keeps exit_mmap off the last page, but introduces a leak * of that page. So, we hang onto it here and free it in * destroy_context_skas. */ mm->context.skas.last_page_table = pmd_page_kernel(*pmd); #ifdef CONFIG_3_LEVEL_PGTABLES mm->context.skas.last_pmd = (unsigned long) __va(pud_val(*pud)); #endif *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT)); *pte = pte_mkexec(*pte); *pte = pte_wrprotect(*pte); return(0); out_pmd: pud_free(pud); out_pte: pmd_free(pmd); out: return(-ENOMEM); }
void __init efi_call_phys_epilog(pgd_t *save_pgd) { /* * After the lock is released, the original page table is restored. */ int pgd_idx, i; int nr_pgds; pgd_t *pgd; p4d_t *p4d; pud_t *pud; if (!efi_enabled(EFI_OLD_MEMMAP)) { write_cr3((unsigned long)save_pgd); __flush_tlb_all(); return; } nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE); for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) { pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE); set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]); if (!(pgd_val(*pgd) & _PAGE_PRESENT)) continue; for (i = 0; i < PTRS_PER_P4D; i++) { p4d = p4d_offset(pgd, pgd_idx * PGDIR_SIZE + i * P4D_SIZE); if (!(p4d_val(*p4d) & _PAGE_PRESENT)) continue; pud = (pud_t *)p4d_page_vaddr(*p4d); pud_free(&init_mm, pud); } p4d = (p4d_t *)pgd_page_vaddr(*pgd); p4d_free(&init_mm, p4d); } kfree(save_pgd); __flush_tlb_all(); early_code_mapping_set_exec(0); }
void pgd_free(struct mm_struct *mm, pgd_t *pgd_base) { unsigned long flags; pgd_t *pgd; pud_t *pud; pmd_t *pmd; pgtable_t pte; if (!pgd_base) return; pgd = pgd_base + pgd_index(0); if (pgd_none_or_clear_bad(pgd)) goto no_pgd; pud = pud_offset(pgd + pgd_index(fcse_va_to_mva(mm, 0)), 0); if (pud_none_or_clear_bad(pud)) goto no_pud; pmd = pmd_offset(pud, 0); if (pmd_none_or_clear_bad(pmd)) goto no_pmd; pte = pmd_pgtable(*pmd); pmd_clear(pmd); pte_free(mm, pte); no_pmd: pud_clear(pud); pmd_free(mm, pmd); no_pud: pgd_clear(pgd); pud_free(mm, pud); no_pgd: pgd_list_lock(flags); pgd_list_del(pgd); pgd_list_unlock(flags); free_pages((unsigned long) pgd_base, 2); }
/* * need to get a 16k page for level 1 */ pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *new_pgd, *init_pgd; pud_t *new_pud, *init_pud; pmd_t *new_pmd, *init_pmd; pte_t *new_pte, *init_pte; new_pgd = __pgd_alloc(); if (!new_pgd) goto no_pgd; memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); /* * Copy over the kernel and IO PGD entries */ init_pgd = pgd_offset_k(0); memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); #ifdef CONFIG_ARM_LPAE /* * Allocate PMD table for modules and pkmap mappings. */ new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR), MODULES_VADDR); if (!new_pud) goto no_pud; new_pmd = pmd_alloc(mm, new_pud, 0); if (!new_pmd) goto no_pmd; #endif if (!vectors_high()) { /* * On ARM, first page must always be allocated since it * contains the machine vectors. The vectors are always high * with LPAE. */ new_pud = pud_alloc(mm, new_pgd, 0); if (!new_pud) goto no_pud; new_pmd = pmd_alloc(mm, new_pud, 0); if (!new_pmd) goto no_pmd; new_pte = pte_alloc_map(mm, NULL, new_pmd, 0); if (!new_pte) goto no_pte; init_pud = pud_offset(init_pgd, 0); init_pmd = pmd_offset(init_pud, 0); init_pte = pte_offset_map(init_pmd, 0); set_pte_ext(new_pte, *init_pte, 0); pte_unmap(init_pte); pte_unmap(new_pte); } return new_pgd; no_pte: pmd_free(mm, new_pmd); no_pmd: pud_free(mm, new_pud); no_pud: __pgd_free(new_pgd); no_pgd: return NULL; }
pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *new_pgd, *init_pgd; pud_t *new_pud, *init_pud; pmd_t *new_pmd, *init_pmd; pte_t *new_pte, *init_pte; new_pgd = __pgd_alloc(); if (!new_pgd) goto no_pgd; memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); init_pgd = pgd_offset_k(0); memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); #ifdef CONFIG_ARM_LPAE new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR), MODULES_VADDR); if (!new_pud) goto no_pud; new_pmd = pmd_alloc(mm, new_pud, 0); if (!new_pmd) goto no_pmd; #endif if (!vectors_high()) { new_pud = pud_alloc(mm, new_pgd, 0); if (!new_pud) goto no_pud; new_pmd = pmd_alloc(mm, new_pud, 0); if (!new_pmd) goto no_pmd; new_pte = pte_alloc_map(mm, NULL, new_pmd, 0); if (!new_pte) goto no_pte; init_pud = pud_offset(init_pgd, 0); init_pmd = pmd_offset(init_pud, 0); init_pte = pte_offset_map(init_pmd, 0); set_pte_ext(new_pte, *init_pte, 0); pte_unmap(init_pte); pte_unmap(new_pte); } return new_pgd; no_pte: pmd_free(mm, new_pmd); no_pmd: pud_free(mm, new_pud); no_pud: __pgd_free(new_pgd); no_pgd: return NULL; }
/* * need to get a 16k page for level 1 */ pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *new_pgd, *init_pgd; pud_t *new_pud, *init_pud; pmd_t *new_pmd, *init_pmd; pte_t *new_pte, *init_pte; #if defined(CONFIG_SYNO_ARMADA_ARCH) new_pgd = __pgd_alloc(); #elif defined(CONFIG_SYNO_COMCERTO) new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, get_order(16384)); #else new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2); #endif if (!new_pgd) goto no_pgd; memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); /* * Copy over the kernel and IO PGD entries */ init_pgd = pgd_offset_k(0); memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); #if defined(CONFIG_SYNO_ARMADA_ARCH) && defined(CONFIG_ARM_LPAE) /* * Allocate PMD table for modules and pkmap mappings. */ new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR), MODULES_VADDR); if (!new_pud) goto no_pud; new_pmd = pmd_alloc(mm, new_pud, 0); if (!new_pmd) goto no_pmd; #endif if (!vectors_high()) { /* * On ARM, first page must always be allocated since it #if defined(CONFIG_SYNO_ARMADA_ARCH) * contains the machine vectors. The vectors are always high * with LPAE. #else * contains the machine vectors. #endif */ new_pud = pud_alloc(mm, new_pgd, 0); if (!new_pud) goto no_pud; new_pmd = pmd_alloc(mm, new_pud, 0); if (!new_pmd) goto no_pmd; new_pte = pte_alloc_map(mm, NULL, new_pmd, 0); if (!new_pte) goto no_pte; init_pud = pud_offset(init_pgd, 0); init_pmd = pmd_offset(init_pud, 0); init_pte = pte_offset_map(init_pmd, 0); set_pte_ext(new_pte, *init_pte, 0); pte_unmap(init_pte); pte_unmap(new_pte); } return new_pgd; no_pte: pmd_free(mm, new_pmd); no_pmd: pud_free(mm, new_pud); no_pud: #if defined(CONFIG_SYNO_ARMADA_ARCH) __pgd_free(new_pgd); #elif defined(CONFIG_SYNO_COMCERTO) free_pages((unsigned long)new_pgd, get_order(16384)); #else free_pages((unsigned long)new_pgd, 2); #endif no_pgd: return NULL; }
/* * need to get a 16k page for level 1 */ pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *new_pgd, *init_pgd; pud_t *new_pud, *init_pud; pmd_t *new_pmd, *init_pmd; pte_t *new_pte, *init_pte; new_pgd = __pgd_alloc(); if (!new_pgd) goto no_pgd; memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); /* * Copy over the kernel and IO PGD entries */ init_pgd = pgd_offset_k(0); memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); #ifdef CONFIG_ARM_LPAE /* * Allocate PMD table for modules and pkmap mappings. */ new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR), MODULES_VADDR); if (!new_pud) goto no_pud; new_pmd = pmd_alloc(mm, new_pud, 0); if (!new_pmd) goto no_pmd; #endif if (!vectors_high()) { /* * On ARM, first page must always be allocated since it * contains the machine vectors. The vectors are always high * with LPAE. */ new_pud = pud_alloc(mm, new_pgd, 0); if (!new_pud) goto no_pud; new_pmd = pmd_alloc(mm, new_pud, 0); if (!new_pmd) goto no_pmd; new_pte = pte_alloc_map(mm, new_pmd, 0); if (!new_pte) goto no_pte; #ifndef CONFIG_ARM_LPAE /* * Modify the PTE pointer to have the correct domain. This * needs to be the vectors domain to avoid the low vectors * being unmapped. */ pmd_val(*new_pmd) &= ~PMD_DOMAIN_MASK; pmd_val(*new_pmd) |= PMD_DOMAIN(DOMAIN_VECTORS); #endif init_pud = pud_offset(init_pgd, 0); init_pmd = pmd_offset(init_pud, 0); init_pte = pte_offset_map(init_pmd, 0); set_pte_ext(new_pte + 0, init_pte[0], 0); set_pte_ext(new_pte + 1, init_pte[1], 0); pte_unmap(init_pte); pte_unmap(new_pte); } return new_pgd; no_pte: pmd_free(mm, new_pmd); mm_dec_nr_pmds(mm); no_pmd: pud_free(mm, new_pud); no_pud: __pgd_free(new_pgd); no_pgd: return NULL; }