/** * free_pgd_slow:释放pgd项 */ void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd) { pmd_t *pmd; pgtable_t pte; if (!pgd) return; /* pgd is always present and good */ pmd = pmd_off(pgd, 0); if (pmd_none(*pmd)) goto free; if (pmd_bad(*pmd)) { pmd_ERROR(*pmd); pmd_clear(pmd); goto free; } pte = pmd_pgtable(*pmd); pmd_clear(pmd); pte_free(mm, pte); pmd_free(mm, pmd); free: free_pages((unsigned long) pgd, 2); }
void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd) { unsigned long flags; pmd_t *pmd; pgtable_t pte; if (!pgd) return; /* pgd is always present and good */ pmd = pmd_off(pgd + pgd_index(fcse_va_to_mva(mm, 0)), 0); if (pmd_none(*pmd)) goto free; if (pmd_bad(*pmd)) { pmd_ERROR(*pmd); pmd_clear(pmd); goto free; } pte = pmd_pgtable(*pmd); pmd_clear(pmd); pte_free(mm, pte); pmd_free(mm, pmd); free: pgd_list_lock(flags); pgd_list_del(pgd); pgd_list_unlock(flags); free_pages((unsigned long) pgd, 2); }
void free_pgd_slow(pgd_t *pgd) { pmd_t *pmd; pte_t *pte; if (!pgd) return; /* pgd is always present and good */ pmd = (pmd_t *)pgd; if (pmd_none(*pmd)) goto free; if (pmd_bad(*pmd)) { pmd_ERROR(*pmd); pmd_clear(pmd); goto free; } pte = pte_offset(pmd, 0); pmd_clear(pmd); pte_free(pte); pmd_free(pmd); free: free_pages((unsigned long) pgd, 2); }
void pgd_free(struct mm_struct *mm, pgd_t *pgd_base) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pgtable_t pte; if (!pgd_base) return; pgd = pgd_base + pgd_index(0); if (pgd_none_or_clear_bad(pgd)) goto no_pgd; pud = pud_offset(pgd, 0); if (pud_none_or_clear_bad(pud)) goto no_pud; pmd = pmd_offset(pud, 0); if (pmd_none_or_clear_bad(pmd)) goto no_pmd; pte = pmd_pgtable(*pmd); pmd_clear(pmd); pte_free(mm, pte); no_pmd: pud_clear(pud); pmd_free(mm, pmd); no_pud: pgd_clear(pgd); pud_free(mm, pud); no_pgd: #if defined(CONFIG_SYNO_ARMADA_ARCH) #ifdef CONFIG_ARM_LPAE /* * Free modules/pkmap or identity pmd tables. */ for (pgd = pgd_base; pgd < pgd_base + PTRS_PER_PGD; pgd++) { if (pgd_none_or_clear_bad(pgd)) continue; if (pgd_val(*pgd) & L_PGD_SWAPPER) continue; pud = pud_offset(pgd, 0); if (pud_none_or_clear_bad(pud)) continue; pmd = pmd_offset(pud, 0); pud_clear(pud); pmd_free(mm, pmd); pgd_clear(pgd); pud_free(mm, pud); } #endif __pgd_free(pgd_base); #elif defined(CONFIG_SYNO_COMCERTO) free_pages((unsigned long) pgd_base, get_order(16384)); #else free_pages((unsigned long) pgd_base, 2); #endif }
/* This is only called when we are critically out of memory * (and fail to get a page in pte_free_tlb). */ void pte_free_now(struct page *ptepage) { pte_freelist_forced_free++; smp_call_function(pte_free_smp_sync, NULL, 0, 1); pte_free(ptepage); }
static void pte_free_rcu_callback(void *arg) { struct pte_freelist_batch *batch = arg; unsigned int i; for (i = 0; i < batch->index; i++) pte_free(batch->pages[i]); free_page((unsigned long)batch); }
void pgd_free(struct mm_struct *mm, pgd_t *pgd_base) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pgtable_t pte; if (!pgd_base) return; pgd = pgd_base + pgd_index(0); if (pgd_none_or_clear_bad(pgd)) goto no_pgd; pud = pud_offset(pgd, 0); if (pud_none_or_clear_bad(pud)) goto no_pud; pmd = pmd_offset(pud, 0); if (pmd_none_or_clear_bad(pmd)) goto no_pmd; pte = pmd_pgtable(*pmd); pmd_clear(pmd); pte_free(mm, pte); mm_dec_nr_ptes(mm); no_pmd: pud_clear(pud); pmd_free(mm, pmd); mm_dec_nr_pmds(mm); no_pud: pgd_clear(pgd); pud_free(mm, pud); no_pgd: #ifdef CONFIG_ARM_LPAE /* * Free modules/pkmap or identity pmd tables. */ for (pgd = pgd_base; pgd < pgd_base + PTRS_PER_PGD; pgd++) { if (pgd_none_or_clear_bad(pgd)) continue; if (pgd_val(*pgd) & L_PGD_SWAPPER) continue; pud = pud_offset(pgd, 0); if (pud_none_or_clear_bad(pud)) continue; pmd = pmd_offset(pud, 0); pud_clear(pud); pmd_free(mm, pmd); mm_dec_nr_pmds(mm); pgd_clear(pgd); pud_free(mm, pud); } #endif __pgd_free(pgd_base); }
static void pte_free_rcu_callback(struct rcu_head *head) { struct pte_freelist_batch *batch = container_of(head, struct pte_freelist_batch, rcu); unsigned int i; for (i = 0; i < batch->index; i++) pte_free(batch->pages[i]); free_page((unsigned long)batch); }
void arch_release_hugepage(struct page *page) { pte_t *ptep; if (MACHINE_HAS_HPAGE) return; ptep = (pte_t *) page[1].index; if (!ptep) return; pte_free(&init_mm, ptep); page[1].index = 0; }
/* * Note: this doesn't free the actual pages themselves. That * has been handled earlier when unmapping all the memory regions. */ static inline void free_one_pmd(pmd_t * dir) { pte_t * pte; if (pmd_none(*dir)) return; if (pmd_bad(*dir)) { pmd_ERROR(*dir); pmd_clear(dir); return; } pte = pte_offset(dir, 0); pmd_clear(dir); pte_free(pte); }
/* * Note: this doesn't free the actual pages themselves. That * has been handled earlier when unmapping all the memory regions. */ static inline void free_one_pmd(pmd_t * dir) { pte_t * pte; if (pmd_none(*dir)) return; if (pmd_bad(*dir)) { printk("free_one_pmd: bad directory entry %08lx\n", pmd_val(*dir)); pmd_clear(dir); return; } pte = pte_offset(dir, 0); pmd_clear(dir); pte_free(pte); }
void pgd_free(struct mm_struct *mm, pgd_t *pgd_base) { unsigned long flags; pgd_t *pgd; pud_t *pud; pmd_t *pmd; pgtable_t pte; if (!pgd_base) return; pgd = pgd_base + pgd_index(0); if (pgd_none_or_clear_bad(pgd)) goto no_pgd; pud = pud_offset(pgd + pgd_index(fcse_va_to_mva(mm, 0)), 0); if (pud_none_or_clear_bad(pud)) goto no_pud; pmd = pmd_offset(pud, 0); if (pmd_none_or_clear_bad(pmd)) goto no_pmd; pte = pmd_pgtable(*pmd); pmd_clear(pmd); pte_free(mm, pte); no_pmd: pud_clear(pud); pmd_free(mm, pmd); no_pud: pgd_clear(pgd); pud_free(mm, pud); no_pgd: pgd_list_lock(flags); pgd_list_del(pgd); pgd_list_unlock(flags); free_pages((unsigned long) pgd_base, 2); }
/* This is only called when we are critically out of memory * (and fail to get a page in pte_free_tlb). */ static void pgtable_free_now(struct page *pte) { smp_call_function(pte_free_smp_sync, NULL, 0, 1); pte_free(pte); }