int add_to_swap(struct page *page) { swp_entry_t entry; int err; VM_BUG_ON(!PageLocked(page)); VM_BUG_ON(!PageUptodate(page)); entry = get_swap_page(); if (!entry.val) return 0; if (unlikely(PageTransHuge(page))) if (unlikely(split_huge_page(page))) { swapcache_free(entry, NULL); return 0; } err = add_to_swap_cache(page, entry, __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN); if (!err) { SetPageDirty(page); return 1; } else { swapcache_free(entry, NULL); return 0; } }
int truncate_inode_page(struct address_space *mapping, struct page *page) { loff_t holelen; VM_BUG_ON_PAGE(PageTail(page), page); holelen = PageTransHuge(page) ? HPAGE_PMD_SIZE : PAGE_SIZE; if (page_mapped(page)) { unmap_mapping_range(mapping, (loff_t)page->index << PAGE_SHIFT, holelen, 0); } return truncate_complete_page(mapping, page); }
/** * add_to_swap - allocate swap space for a page * @page: page we want to move to swap * * Allocate swap space for the page and add the page to the * swap cache. Caller needs to hold the page lock. */ int add_to_swap(struct page *page) { swp_entry_t entry; int err; VM_BUG_ON(!PageLocked(page)); VM_BUG_ON(!PageUptodate(page)); #ifdef CONFIG_HSWAP if (!current_is_kswapd()) entry = get_lowest_prio_swap_page(); else #endif entry = get_swap_page(); if (!entry.val) return 0; if (unlikely(PageTransHuge(page))) if (unlikely(split_huge_page(page))) { swapcache_free(entry, NULL); return 0; } /* * Radix-tree node allocations from PF_MEMALLOC contexts could * completely exhaust the page allocator. __GFP_NOMEMALLOC * stops emergency reserves from being allocated. * * TODO: this could cause a theoretical memory reclaim * deadlock in the swap out path. */ /* * Add it to the swap cache and mark it dirty */ err = add_to_swap_cache(page, entry, __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN); if (!err) { /* Success */ SetPageDirty(page); return 1; } else { /* -ENOMEM radix-tree allocation failure */ /* * add_to_swap_cache() doesn't return -EEXIST, so we can safely * clear SWAP_HAS_CACHE flag. */ swapcache_free(entry, NULL); return 0; } }
/** * add_to_swap - allocate swap space for a page * @page: page we want to move to swap * * Allocate swap space for the page and add the page to the * swap cache. Caller needs to hold the page lock. */ int add_to_swap(struct page *page, struct list_head *list) { swp_entry_t entry; int err; VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(!PageUptodate(page), page); entry = get_swap_page(); if (!entry.val) return 0; if (mem_cgroup_try_charge_swap(page, entry)) { swapcache_free(entry); return 0; } if (unlikely(PageTransHuge(page))) if (unlikely(split_huge_page_to_list(page, list))) { swapcache_free(entry); return 0; } /* * Radix-tree node allocations from PF_MEMALLOC contexts could * completely exhaust the page allocator. __GFP_NOMEMALLOC * stops emergency reserves from being allocated. * * TODO: this could cause a theoretical memory reclaim * deadlock in the swap out path. */ /* * Add it to the swap cache. */ err = add_to_swap_cache(page, entry, __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN); if (!err) { return 1; } else { /* -ENOMEM radix-tree allocation failure */ /* * add_to_swap_cache() doesn't return -EEXIST, so we can safely * clear SWAP_HAS_CACHE flag. */ swapcache_free(entry); return 0; } }
/** * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode * @mapping: the address_space which holds the pages to invalidate * @start: the offset 'from' which to invalidate * @end: the offset 'to' which to invalidate (inclusive) * * This function only removes the unlocked pages, if you want to * remove all the pages of one inode, you must call truncate_inode_pages. * * invalidate_mapping_pages() will not block on IO activity. It will not * invalidate pages which are dirty, locked, under writeback or mapped into * pagetables. */ unsigned long invalidate_mapping_pages(struct address_space *mapping, pgoff_t start, pgoff_t end) { pgoff_t indices[PAGEVEC_SIZE]; struct pagevec pvec; pgoff_t index = start; unsigned long ret; unsigned long count = 0; int i; pagevec_init(&pvec, 0); while (index <= end && pagevec_lookup_entries(&pvec, mapping, index, min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1, indices)) { for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; /* We rely upon deletion not changing page->index */ index = indices[i]; if (index > end) break; if (radix_tree_exceptional_entry(page)) { clear_exceptional_entry(mapping, index, page); continue; } if (!trylock_page(page)) continue; WARN_ON(page_to_index(page) != index); /* Middle of THP: skip */ if (PageTransTail(page)) { unlock_page(page); continue; } else if (PageTransHuge(page)) { index += HPAGE_PMD_NR - 1; i += HPAGE_PMD_NR - 1; /* 'end' is in the middle of THP */ if (index == round_down(end, HPAGE_PMD_NR)) continue; } ret = invalidate_inode_page(page); unlock_page(page); /* * Invalidation is a hint that the page is no longer * of interest and try to speed up its reclaim. */ if (!ret) deactivate_file_page(page); count += ret; } pagevec_remove_exceptionals(&pvec); pagevec_release(&pvec); cond_resched(); index++; } return count; }
/** * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at * @pvmw->address * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags * must be set. pmd, pte and ptl must be NULL. * * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is * adjusted if needed (for PTE-mapped THPs). * * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in * a loop to find all PTEs that map the THP. * * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry * regardless of which page table level the page is mapped at. @pvmw->pmd is * NULL. * * Retruns false if there are no more page table entries for the page in * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped. * * If you need to stop the walk before page_vma_mapped_walk() returned false, * use page_vma_mapped_walk_done(). It will do the housekeeping. */ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) { struct mm_struct *mm = pvmw->vma->vm_mm; struct page *page = pvmw->page; pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t pmde; /* The only possible pmd mapping has been handled on last iteration */ if (pvmw->pmd && !pvmw->pte) return not_found(pvmw); if (pvmw->pte) goto next_pte; if (unlikely(PageHuge(pvmw->page))) { /* when pud is not present, pte will be NULL */ pvmw->pte = huge_pte_offset(mm, pvmw->address, PAGE_SIZE << compound_order(page)); if (!pvmw->pte) return false; pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte); spin_lock(pvmw->ptl); if (!check_pte(pvmw)) return not_found(pvmw); return true; } restart: pgd = pgd_offset(mm, pvmw->address); if (!pgd_present(*pgd)) return false; p4d = p4d_offset(pgd, pvmw->address); if (!p4d_present(*p4d)) return false; pud = pud_offset(p4d, pvmw->address); if (!pud_present(*pud)) return false; pvmw->pmd = pmd_offset(pud, pvmw->address); /* * Make sure the pmd value isn't cached in a register by the * compiler and used as a stale value after we've observed a * subsequent update. */ pmde = READ_ONCE(*pvmw->pmd); if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) { pvmw->ptl = pmd_lock(mm, pvmw->pmd); if (likely(pmd_trans_huge(*pvmw->pmd))) { if (pvmw->flags & PVMW_MIGRATION) return not_found(pvmw); if (pmd_page(*pvmw->pmd) != page) return not_found(pvmw); return true; } else if (!pmd_present(*pvmw->pmd)) { if (thp_migration_supported()) { if (!(pvmw->flags & PVMW_MIGRATION)) return not_found(pvmw); if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) { swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd); if (migration_entry_to_page(entry) != page) return not_found(pvmw); return true; } } return not_found(pvmw); } else { /* THP pmd was split under us: handle on pte level */ spin_unlock(pvmw->ptl); pvmw->ptl = NULL; } } else if (!pmd_present(pmde)) { return false; } if (!map_pte(pvmw)) goto next_pte; while (1) { if (check_pte(pvmw)) return true; next_pte: /* Seek to next pte only makes sense for THP */ if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page)) return not_found(pvmw); do { pvmw->address += PAGE_SIZE; if (pvmw->address >= pvmw->vma->vm_end || pvmw->address >= __vma_address(pvmw->page, pvmw->vma) + hpage_nr_pages(pvmw->page) * PAGE_SIZE) return not_found(pvmw); /* Did we cross page table boundary? */ if (pvmw->address % PMD_SIZE == 0) { pte_unmap(pvmw->pte); if (pvmw->ptl) { spin_unlock(pvmw->ptl); pvmw->ptl = NULL; } goto restart; } else { pvmw->pte++; } } while (pte_none(*pvmw->pte)); if (!pvmw->ptl) { pvmw->ptl = pte_lockptr(mm, pvmw->pmd); spin_lock(pvmw->ptl); } } }