void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd) { unsigned long flags; pmd_t *pmd; pgtable_t pte; if (!pgd) return; /* pgd is always present and good */ pmd = pmd_off(pgd + pgd_index(fcse_va_to_mva(mm, 0)), 0); if (pmd_none(*pmd)) goto free; if (pmd_bad(*pmd)) { pmd_ERROR(*pmd); pmd_clear(pmd); goto free; } pte = pmd_pgtable(*pmd); pmd_clear(pmd); pte_free(mm, pte); pmd_free(mm, pmd); free: pgd_list_lock(flags); pgd_list_del(pgd); pgd_list_unlock(flags); free_pages((unsigned long) pgd, 2); }
void pgd_free(pgd_t *pgd) { int i; pgd_test_and_unpin(pgd); /* in the PAE case user pgd entries are overwritten before usage */ if (PTRS_PER_PMD > 1) { for (i = 0; i < USER_PTRS_PER_PGD; ++i) { pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1); kmem_cache_free(pmd_cache, pmd); } if (!HAVE_SHARED_KERNEL_PMD) { unsigned long flags; spin_lock_irqsave(&pgd_lock, flags); pgd_list_del(pgd); spin_unlock_irqrestore(&pgd_lock, flags); for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) { pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1); make_lowmem_page_writable( pmd, XENFEAT_writable_page_tables); memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t)); kmem_cache_free(pmd_cache, pmd); } } } /* in the non-PAE case, free_pgtables() clears user pgd entries */ kmem_cache_free(pgd_cache, pgd); }
/* never called when PTRS_PER_PMD > 1 */ void pgd_dtor(void *pgd) { unsigned long flags; /* can be called from interrupt context */ spin_lock_irqsave(&pgd_lock, flags); pgd_list_del(pgd); spin_unlock_irqrestore(&pgd_lock, flags); }
void pgd_dtor(void *pgd) { unsigned long flags; spin_lock_irqsave(&pgd_lock, flags); pgd_list_del(pgd); spin_unlock_irqrestore(&pgd_lock, flags); }
/* never called when PTRS_PER_PMD > 1 */ void pgd_dtor(void *pgd, struct kmem_cache *cache, unsigned long unused) { unsigned long flags; /* can be called from interrupt context */ paravirt_release_pd(__pa(pgd) >> PAGE_SHIFT); spin_lock_irqsave(&pgd_lock, flags); pgd_list_del(pgd); spin_unlock_irqrestore(&pgd_lock, flags); }
static void pgd_dtor(void *pgd) { unsigned long flags; /* can be called from interrupt context */ if (SHARED_KERNEL_PMD) return; paravirt_release_pd(__pa(pgd) >> PAGE_SHIFT); spin_lock_irqsave(&pgd_lock, flags); pgd_list_del(pgd); spin_unlock_irqrestore(&pgd_lock, flags); }
void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused) { unsigned long flags; /* can be called from interrupt context */ if (PTRS_PER_PMD > 1) { if (!xen_feature(XENFEAT_pae_pgdir_above_4gb)) xen_destroy_contiguous_region((unsigned long)pgd, 0); } else { spin_lock_irqsave(&pgd_lock, flags); pgd_list_del(pgd); spin_unlock_irqrestore(&pgd_lock, flags); pgd_test_and_unpin(pgd); } }
void pgd_free(struct mm_struct *mm, pgd_t *pgd_base) { unsigned long flags; pgd_t *pgd; pud_t *pud; pmd_t *pmd; pgtable_t pte; if (!pgd_base) return; pgd = pgd_base + pgd_index(0); if (pgd_none_or_clear_bad(pgd)) goto no_pgd; pud = pud_offset(pgd + pgd_index(fcse_va_to_mva(mm, 0)), 0); if (pud_none_or_clear_bad(pud)) goto no_pud; pmd = pmd_offset(pud, 0); if (pmd_none_or_clear_bad(pmd)) goto no_pmd; pte = pmd_pgtable(*pmd); pmd_clear(pmd); pte_free(mm, pte); no_pmd: pud_clear(pud); pmd_free(mm, pmd); no_pud: pgd_clear(pgd); pud_free(mm, pud); no_pgd: pgd_list_lock(flags); pgd_list_del(pgd); pgd_list_unlock(flags); free_pages((unsigned long) pgd_base, 2); }