unsigned long l4x_set_pmd(struct mm_struct *mm, unsigned long addr, pmd_t old, pmd_t pmdval) { /* * Check if any invalidation is necessary * * Invalidation (flush) necessary if: * old page was present * new page is not present OR * new page has another physical address OR * new page has another protection OR * new page has other access attributes */ /* old was present && new not -> flush */ int flush_rights = L4_FPAGE_RWX; BUG_ON(!pmd_large(old)); if (pmd_present(pmdval)) { /* new page is present, * now we have to find out what has changed */ if (((pmd_val(old) ^ pmd_val(pmdval)) & PMD_PAGE_MASK & PHYSICAL_PAGE_MASK) || (pmd_young(old) && !pmd_young(pmdval))) { /* physical page frame changed * || access attribute changed -> flush */ /* flush is the default */ } else if ((pmd_write(old) && !pmd_write(pmdval)) || (pmd_flags(old) & ~pmd_flags(pmdval) & (_PAGE_DIRTY | _PAGE_SOFT_DIRTY))) { /* Protection changed from r/w to ro * or page now clean -> remap */ flush_rights = L4_FPAGE_W; } else { /* nothing changed, simply return */ return pmd_val(pmdval); } } /* Ok, now actually flush or remap the page */ l4x_flush_page(mm, pmd_val(old) & PMD_PAGE_MASK & PHYSICAL_PAGE_MASK, addr, PMD_SHIFT, flush_rights, _RET_IP_); return pmd_val(pmdval); }
static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, struct mm_walk *walk) { struct mem_size_stats *mss = walk->private; struct vm_area_struct *vma = walk->vma; struct page *page; /* FOLL_DUMP will return -EFAULT on huge zero page */ page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP); if (IS_ERR_OR_NULL(page)) return; mss->anonymous_thp += HPAGE_PMD_SIZE; smaps_account(mss, page, HPAGE_PMD_SIZE, pmd_young(*pmd), pmd_dirty(*pmd)); }