void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd) { unsigned long flags; pmd_t *pmd; pgtable_t pte; if (!pgd) return; /* pgd is always present and good */ pmd = pmd_off(pgd + pgd_index(fcse_va_to_mva(mm, 0)), 0); if (pmd_none(*pmd)) goto free; if (pmd_bad(*pmd)) { pmd_ERROR(*pmd); pmd_clear(pmd); goto free; } pte = pmd_pgtable(*pmd); pmd_clear(pmd); pte_free(mm, pte); pmd_free(mm, pmd); free: pgd_list_lock(flags); pgd_list_del(pgd); pgd_list_unlock(flags); free_pages((unsigned long) pgd, 2); }
void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd) { pmd_t *pmd; pgtable_t pte; if (!pgd) return; /* pgd is always present and good */ pmd = pmd_off(pgd, 0); if (pmd_none(*pmd)) goto free; if (pmd_bad(*pmd)) { pmd_ERROR(*pmd); pmd_clear(pmd); goto free; } pte = pmd_pgtable(*pmd); pmd_clear(pmd); pte_free(mm, pte); pmd_free(mm, pmd); free: free_pages((unsigned long) pgd, 2); }
/* * In order to soft-boot, we need to insert a 1:1 mapping in place of * the user-mode pages. This will then ensure that we have predictable * results when turning the mmu off */ void setup_mm_for_reboot(char mode) { unsigned long base_pmdval; pgd_t *pgd; int i; if (current->mm && current->mm->pgd) pgd = current->mm->pgd; else pgd = init_mm.pgd; base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; // if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) base_pmdval |= PMD_BIT4; for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) { unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval; pmd_t *pmd; pmd = pmd_off(pgd, i << PGDIR_SHIFT); pmd[0] = __pmd(pmdval); pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1))); flush_pmd_entry(pmd); } }
static inline pmd_t *pmd_off_k(unsigned long virt) { pgd_t *pgd; pgd = pgd_offset_k(virt); return pmd_off(pgd, virt); }