/* * Called by TLB shootdown */ void __free_pte(pte_t pte) { struct page *page = pte_page(pte); if ((!VALID_PAGE(page)) || PageReserved(page)) return; if (pte_dirty(pte)) set_page_dirty(page); free_page_and_swap_cache(page); }
static inline void forget_pte(pte_t page) { if (pte_none(page)) return; if (pte_present(page)) { struct page *ptpage = pte_page(page); if ((!VALID_PAGE(ptpage)) || PageReserved(ptpage)) return; /* * free_page() used to be able to clear swap cache * entries. We may now have to do it manually. */ free_page_and_swap_cache(ptpage); return; } swap_free(pte_to_swp_entry(page)); }
static inline void forget_pte(pte_t page) { if (pte_none(page)) return; if (pte_present(page)) { unsigned long addr = pte_page(page); if (MAP_NR(addr) >= max_mapnr || PageReserved(mem_map+MAP_NR(addr))) return; /* * free_page() used to be able to clear swap cache * entries. We may now have to do it manually. */ free_page_and_swap_cache(addr); return; } swap_free(pte_val(page)); }
/* * Return indicates whether a page was freed so caller can adjust rss */ static inline int free_pte(pte_t pte) { if (pte_present(pte)) { struct page *page = pte_page(pte); if ((!VALID_PAGE(page)) || PageReserved(page)) return 0; /* * free_page() used to be able to clear swap cache * entries. We may now have to do it manually. */ if (pte_dirty(pte) && page->mapping) set_page_dirty(page); free_page_and_swap_cache(page); return 1; } swap_free(pte_to_swp_entry(pte)); return 0; }