/* * Check that TLB entries with kernel ASID (1) have kernel VMA (>= TASK_SIZE), * and TLB entries with user ASID (>=4) have VMA < TASK_SIZE. * * Check that valid TLB entries either have the same PA as the PTE, or PTE is * marked as non-present. Non-present PTE and the page with non-zero refcount * and zero mapcount is normal for batched TLB flush operation. Zero refcount * means that the page was freed prematurely. Non-zero mapcount is unusual, * but does not necessary means an error, thus marked as suspicious. */ static int check_tlb_entry(unsigned w, unsigned e, bool dtlb) { unsigned tlbidx = w | (e << PAGE_SHIFT); unsigned r0 = dtlb ? read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx); unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT); unsigned pte = get_pte_for_vaddr(vpn); unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK; unsigned tlb_asid = r0 & ASID_MASK; bool kernel = tlb_asid == 1; int rc = 0; if (tlb_asid > 0 && ((vpn < TASK_SIZE) == kernel)) { pr_err("%cTLB: way: %u, entry: %u, VPN %08x in %s PTE\n", dtlb ? 'D' : 'I', w, e, vpn, kernel ? "kernel" : "user"); rc |= TLB_INSANE; } if (tlb_asid == mm_asid) { unsigned r1 = dtlb ? read_dtlb_translation(tlbidx) : read_itlb_translation(tlbidx); if ((pte ^ r1) & PAGE_MASK) { pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n", dtlb ? 'D' : 'I', w, e, r0, r1, pte); if (pte == 0 || !pte_present(__pte(pte))) { struct page *p = pfn_to_page(r1 >> PAGE_SHIFT); pr_err("page refcount: %d, mapcount: %d\n", page_count(p), page_mapcount(p)); if (!page_count(p)) rc |= TLB_INSANE; else if (page_mapcount(p)) rc |= TLB_SUSPICIOUS; } else {
static void put_compound_page(struct page *page) { if (unlikely(PageTail(page))) { /* __split_huge_page_refcount can run under us */ struct page *page_head = compound_trans_head(page); if (likely(page != page_head && get_page_unless_zero(page_head))) { unsigned long flags; /* * page_head wasn't a dangling pointer but it * may not be a head page anymore by the time * we obtain the lock. That is ok as long as it * can't be freed from under us. */ flags = compound_lock_irqsave(page_head); if (unlikely(!PageTail(page))) { /* __split_huge_page_refcount run before us */ compound_unlock_irqrestore(page_head, flags); VM_BUG_ON(PageHead(page_head)); if (put_page_testzero(page_head)) __put_single_page(page_head); out_put_single: if (put_page_testzero(page)) __put_single_page(page); return; } VM_BUG_ON(page_head != page->first_page); /* * We can release the refcount taken by * get_page_unless_zero() now that * __split_huge_page_refcount() is blocked on * the compound_lock. */ if (put_page_testzero(page_head)) VM_BUG_ON(1); /* __split_huge_page_refcount will wait now */ VM_BUG_ON(page_mapcount(page) <= 0); atomic_dec(&page->_mapcount); VM_BUG_ON(atomic_read(&page_head->_count) <= 0); VM_BUG_ON(atomic_read(&page->_count) != 0); compound_unlock_irqrestore(page_head, flags); if (put_page_testzero(page_head)) { if (PageHead(page_head)) __put_compound_page(page_head); else __put_single_page(page_head); } } else { /* page_head is a dangling pointer */ VM_BUG_ON(PageTail(page)); goto out_put_single; } } else if (put_page_testzero(page)) { if (PageHead(page)) __put_compound_page(page); else __put_single_page(page); } }
/* our own functions */ static void api_print_addressinfo(void *logical_adr) { struct page *page = virt_to_page(logical_adr); if (page == NULL) { PR_INFO("unable to translate address %p to page", logical_adr); return; } PR_INFO( "address %p, page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n", logical_adr, page, (int)(2 * sizeof(unsigned long)), page->flags, page->mapping, page_mapcount(page), page_count(page) ); PR_INFO("PG_lru is %lu", page->flags & (1 << PG_lru)); PR_INFO("PG_private is %lu", page->flags & (1 << PG_private)); PR_INFO("PG_locked is %lu", page->flags & (1 << PG_locked)); /* Missing in newer kernels and so is remarked... */ /* PR_INFO("PG_buddy is %lu", page->flags & (1 << PG_buddy)); */ PR_INFO("PG_writeback is %lu", page->flags & (1 << PG_writeback)); PR_INFO("PG_slab is %lu", page->flags & (1 << PG_slab)); PR_INFO("PG_swapcache is %lu", page->flags & (1 << PG_swapcache)); PR_INFO("PG_active is %lu", page->flags & (1 << PG_active)); PR_INFO("PG_reserved is %lu", page->flags & (1 << PG_reserved)); }
static void smaps_account(struct mem_size_stats *mss, struct page *page, unsigned long size, bool young, bool dirty) { int mapcount; if (PageAnon(page)) mss->anonymous += size; mss->resident += size; /* Accumulate the size in pages that have been accessed. */ if (young || PageReferenced(page)) mss->referenced += size; mapcount = page_mapcount(page); if (mapcount >= 2) { u64 pss_delta; if (dirty || PageDirty(page)) mss->shared_dirty += size; else mss->shared_clean += size; pss_delta = (u64)size << PSS_SHIFT; do_div(pss_delta, mapcount); mss->pss += pss_delta; } else { if (dirty || PageDirty(page)) mss->private_dirty += size; else mss->private_clean += size; mss->pss += (u64)size << PSS_SHIFT; } }
void __dump_page(struct page *page, const char *reason) { struct address_space *mapping = page_mapping(page); bool page_poisoned = PagePoisoned(page); int mapcount; /* * If struct page is poisoned don't access Page*() functions as that * leads to recursive loop. Page*() check for poisoned pages, and calls * dump_page() when detected. */ if (page_poisoned) { pr_warn("page:%px is uninitialized and poisoned", page); goto hex_only; } /* * Avoid VM_BUG_ON() in page_mapcount(). * page->_mapcount space in struct page is used by sl[aou]b pages to * encode own info. */ mapcount = PageSlab(page) ? 0 : page_mapcount(page); pr_warn("page:%px count:%d mapcount:%d mapping:%px index:%#lx", page, page_ref_count(page), mapcount, page->mapping, page_to_pgoff(page)); if (PageCompound(page)) pr_cont(" compound_mapcount: %d", compound_mapcount(page)); pr_cont("\n"); if (PageAnon(page)) pr_warn("anon "); else if (PageKsm(page)) pr_warn("ksm "); else if (mapping) { pr_warn("%ps ", mapping->a_ops); if (mapping->host->i_dentry.first) { struct dentry *dentry; dentry = container_of(mapping->host->i_dentry.first, struct dentry, d_u.d_alias); pr_warn("name:\"%pd\" ", dentry); } } BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1); pr_warn("flags: %#lx(%pGp)\n", page->flags, &page->flags); hex_only: print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32, sizeof(unsigned long), page, sizeof(struct page), false); if (reason) pr_warn("page dumped because: %s\n", reason); #ifdef CONFIG_MEMCG if (!page_poisoned && page->mem_cgroup) pr_warn("page->mem_cgroup:%px\n", page->mem_cgroup); #endif }
/* * We can use this swap cache entry directly * if there are no other references to it. */ int can_share_swap_page(struct page *page) { int count; BUG_ON(!PageLocked(page)); count = page_mapcount(page); if (count <= 1 && PageSwapCache(page)) count += page_swapcount(page); return count == 1; }
/* * Like "free_pages_check", only without calling bad_page/modifying the * page. */ static inline int free_pages_check__just_test(struct page *page) { if (unlikely(page_mapcount(page) | (page->mapping != NULL) | (atomic_read(&page->_count) != 0) | (page->flags & PAGE_FLAGS_CHECK_AT_FREE))) { return 1; } return 0; }
/* * This page is about to be returned from the page allocator */ static inline int check_new_page(struct page *page) { if (unlikely(page_mapcount(page) | (page->mapping != NULL) | (atomic_read(&page->_count) != 0) | (page->flags & PAGE_FLAGS_CHECK_AT_PREP))) { bad_page(page); return 1; } return 0; }
void __flush_anon_page(struct page *page, unsigned long vmaddr) { unsigned long addr = (unsigned long) page_address(page); if (pages_do_alias(addr, vmaddr)) { if (page_mapcount(page) && !Page_dcache_dirty(page)) { void *kaddr; kaddr = kmap_coherent(page, vmaddr); flush_data_cache_page((unsigned long)kaddr); kunmap_coherent(); } else flush_data_cache_page(addr); } }
void dump_page_badflags(struct page *page, const char *reason, unsigned long badflags) { pr_emerg("page:%p count:%d mapcount:%d mapping:%p index:%#lx\n", page, atomic_read(&page->_count), page_mapcount(page), page->mapping, page->index); BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS); dump_flags(page->flags, pageflag_names, ARRAY_SIZE(pageflag_names)); if (reason) pr_alert("page dumped because: %s\n", reason); if (page->flags & badflags) { pr_alert("bad because of flags:\n"); dump_flags(page->flags & badflags, pageflag_names, ARRAY_SIZE(pageflag_names)); } mem_cgroup_print_bad_page(page); }
void __dump_page(struct page *page, const char *reason) { bool page_poisoned = PagePoisoned(page); int mapcount; /* * If struct page is poisoned don't access Page*() functions as that * leads to recursive loop. Page*() check for poisoned pages, and calls * dump_page() when detected. */ if (page_poisoned) { pr_emerg("page:%px is uninitialized and poisoned", page); goto hex_only; } /* * Avoid VM_BUG_ON() in page_mapcount(). * page->_mapcount space in struct page is used by sl[aou]b pages to * encode own info. */ mapcount = PageSlab(page) ? 0 : page_mapcount(page); pr_emerg("page:%px count:%d mapcount:%d mapping:%px index:%#lx", page, page_ref_count(page), mapcount, page->mapping, page_to_pgoff(page)); if (PageCompound(page)) pr_cont(" compound_mapcount: %d", compound_mapcount(page)); pr_cont("\n"); BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1); pr_emerg("flags: %#lx(%pGp)\n", page->flags, &page->flags); hex_only: print_hex_dump(KERN_ALERT, "raw: ", DUMP_PREFIX_NONE, 32, sizeof(unsigned long), page, sizeof(struct page), false); if (reason) pr_alert("page dumped because: %s\n", reason); #ifdef CONFIG_MEMCG if (!page_poisoned && page->mem_cgroup) pr_alert("page->mem_cgroup:%px\n", page->mem_cgroup); #endif }
/* /proc/kpagecount - an array exposing page counts * * Each entry is a u64 representing the corresponding * physical page count. */ static ssize_t kpagecount_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { u64 __user *out = (u64 __user *)buf; struct page *ppage; unsigned long src = *ppos; unsigned long pfn; unsigned long max_pfn_kpmsize = max_pfn * KPMSIZE; ssize_t ret = 0; u64 pcount; pfn = src / KPMSIZE; if(src != max_pfn_kpmsize){ count = min_t(size_t, count, max_pfn_kpmsize - src); } if (src & KPMMASK || count & KPMMASK) return -EINVAL; while (count > 0) { if (pfn_valid(pfn)) ppage = pfn_to_page(pfn); else ppage = NULL; if (!ppage || PageSlab(ppage)) pcount = 0; else pcount = page_mapcount(ppage); if (put_user(pcount, out)) { ret = -EFAULT; break; } pfn++; out++; count -= KPMSIZE; } *ppos += (char __user *)out - buf; if (!ret) ret = (char __user *)out - buf; return ret; }
void homecache_change_page_home(struct page *page, int order, int home) { int i, pages = (1 << order); unsigned long kva; BUG_ON(PageHighMem(page)); BUG_ON(page_count(page) > 1); BUG_ON(page_mapcount(page) != 0); kva = (unsigned long) page_address(page); flush_remote(0, HV_FLUSH_EVICT_L2, &cpu_cacheable_map, kva, pages * PAGE_SIZE, PAGE_SIZE, cpu_online_mask, NULL, 0); for (i = 0; i < pages; ++i, kva += PAGE_SIZE) { pte_t *ptep = virt_to_pte(NULL, kva); pte_t pteval = *ptep; BUG_ON(!pte_present(pteval) || pte_huge(pteval)); *ptep = pte_set_home(pteval, home); } }
static int bc_io_show(struct seq_file *f, void *v) { struct list_head *lh; struct page_beancounter *pb; struct page *pg; lh = (struct list_head *)v; if (lh == &pb_io_list) { seq_printf(f, "Races: anon %lu missed %lu\n", anon_pages, not_released); seq_printf(f, "%-*s %-1s %-*s %-4s %*s %*s " "%-*s %-*s %-1s %-*s %-*s\n", PTR_SIZE, "pb", "", PTR_SIZE, "page", "flg", INT_SIZE, "cnt", INT_SIZE, "mcnt", PTR_SIZE, "pb_list", PTR_SIZE, "page_pb", "", PTR_SIZE, "mapping", INT_SIZE, "ub"); return 0; } pb = list_entry(lh, struct page_beancounter, io_list); pg = pb->page; seq_printf(f, "%p %c %p %c%c%c%c %*d %*d %p %p %c %p %d\n", pb, pb->io_debug ? 'e' : 'm', pg, PageDirty(pg) ? 'D' : 'd', PageAnon(pg) ? 'A' : 'a', PageWriteback(pg) ? 'W' : 'w', PageLocked(pg) ? 'L' : 'l', INT_SIZE, page_count(pg), INT_SIZE, page_mapcount(pg), pb->page_pb_list, page_pbc(pg), iopb_to_pb(page_pbc(pg)) == pb ? ' ' : '!', pg->mapping, pb->ub->ub_uid); return 0; }
static __always_inline void put_refcounted_compound_page(struct page *page_head, struct page *page) { if (likely(page != page_head && get_page_unless_zero(page_head))) { unsigned long flags; /* * @page_head wasn't a dangling pointer but it may not * be a head page anymore by the time we obtain the * lock. That is ok as long as it can't be freed from * under us. */ flags = compound_lock_irqsave(page_head); if (unlikely(!PageTail(page))) { /* __split_huge_page_refcount run before us */ compound_unlock_irqrestore(page_head, flags); if (put_page_testzero(page_head)) { /* * The @page_head may have been freed * and reallocated as a compound page * of smaller order and then freed * again. All we know is that it * cannot have become: a THP page, a * compound page of higher order, a * tail page. That is because we * still hold the refcount of the * split THP tail and page_head was * the THP head before the split. */ if (PageHead(page_head)) __put_compound_page(page_head); else __put_single_page(page_head); } out_put_single: if (put_page_testzero(page)) __put_single_page(page); return; } VM_BUG_ON_PAGE(page_head != compound_head(page), page); /* * We can release the refcount taken by * get_page_unless_zero() now that * __split_huge_page_refcount() is blocked on the * compound_lock. */ if (put_page_testzero(page_head)) VM_BUG_ON_PAGE(1, page_head); /* __split_huge_page_refcount will wait now */ VM_BUG_ON_PAGE(page_mapcount(page) <= 0, page); atomic_dec(&page->_mapcount); VM_BUG_ON_PAGE(atomic_read(&page_head->_count) <= 0, page_head); VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page); compound_unlock_irqrestore(page_head, flags); if (put_page_testzero(page_head)) { if (PageHead(page_head)) __put_compound_page(page_head); else __put_single_page(page_head); } } else { /* @page_head is a dangling pointer */ VM_BUG_ON_PAGE(PageTail(page), page); goto out_put_single; } }
static void put_compound_page(struct page *page) { struct page *page_head; if (likely(!PageTail(page))) { if (put_page_testzero(page)) { /* * By the time all refcounts have been released * split_huge_page cannot run anymore from under us. */ if (PageHead(page)) __put_compound_page(page); else __put_single_page(page); } return; } /* __split_huge_page_refcount can run under us */ page_head = compound_trans_head(page); /* * THP can not break up slab pages so avoid taking * compound_lock() and skip the tail page refcounting (in * _mapcount) too. Slab performs non-atomic bit ops on * page->flags for better performance. In particular * slab_unlock() in slub used to be a hot path. It is still * hot on arches that do not support * this_cpu_cmpxchg_double(). * * If "page" is part of a slab or hugetlbfs page it cannot be * splitted and the head page cannot change from under us. And * if "page" is part of a THP page under splitting, if the * head page pointed by the THP tail isn't a THP head anymore, * we'll find PageTail clear after smp_rmb() and we'll treat * it as a single page. */ if (!__compound_tail_refcounted(page_head)) { /* * If "page" is a THP tail, we must read the tail page * flags after the head page flags. The * split_huge_page side enforces write memory barriers * between clearing PageTail and before the head page * can be freed and reallocated. */ smp_rmb(); if (likely(PageTail(page))) { /* * __split_huge_page_refcount cannot race * here. */ VM_BUG_ON(!PageHead(page_head)); VM_BUG_ON(page_mapcount(page) != 0); if (put_page_testzero(page_head)) { /* * If this is the tail of a slab * compound page, the tail pin must * not be the last reference held on * the page, because the PG_slab * cannot be cleared before all tail * pins (which skips the _mapcount * tail refcounting) have been * released. For hugetlbfs the tail * pin may be the last reference on * the page instead, because * PageHeadHuge will not go away until * the compound page enters the buddy * allocator. */ VM_BUG_ON(PageSlab(page_head)); __put_compound_page(page_head); } return; } else /* * __split_huge_page_refcount run before us, * "page" was a THP tail. The split page_head * has been freed and reallocated as slab or * hugetlbfs page of smaller order (only * possible if reallocated as slab on x86). */ goto out_put_single; } if (likely(page != page_head && get_page_unless_zero(page_head))) { unsigned long flags; /* * page_head wasn't a dangling pointer but it may not * be a head page anymore by the time we obtain the * lock. That is ok as long as it can't be freed from * under us. */ flags = compound_lock_irqsave(page_head); if (unlikely(!PageTail(page))) { /* __split_huge_page_refcount run before us */ compound_unlock_irqrestore(page_head, flags); if (put_page_testzero(page_head)) { /* * The head page may have been freed * and reallocated as a compound page * of smaller order and then freed * again. All we know is that it * cannot have become: a THP page, a * compound page of higher order, a * tail page. That is because we * still hold the refcount of the * split THP tail and page_head was * the THP head before the split. */ if (PageHead(page_head)) __put_compound_page(page_head); else __put_single_page(page_head); } out_put_single: if (put_page_testzero(page)) __put_single_page(page); return; } VM_BUG_ON(page_head != page->first_page); /* * We can release the refcount taken by * get_page_unless_zero() now that * __split_huge_page_refcount() is blocked on the * compound_lock. */ if (put_page_testzero(page_head)) VM_BUG_ON(1); /* __split_huge_page_refcount will wait now */ VM_BUG_ON(page_mapcount(page) <= 0); atomic_dec(&page->_mapcount); VM_BUG_ON(atomic_read(&page_head->_count) <= 0); VM_BUG_ON(atomic_read(&page->_count) != 0); compound_unlock_irqrestore(page_head, flags); if (put_page_testzero(page_head)) { if (PageHead(page_head)) __put_compound_page(page_head); else __put_single_page(page_head); } } else { /* page_head is a dangling pointer */ VM_BUG_ON(PageTail(page)); goto out_put_single; } }
/* * The pageout code holds an extra reference to the page. That raises * the reference count to test for to 2 for a page that is only in the * swap cache plus 1 for each process that maps the page. */ int remove_exclusive_swap_page_ref(struct page *page) { return remove_exclusive_swap_page_count(page, 2 + page_mapcount(page)); }
void my_dump_page(struct page* page, char* msg){ if (!msg) msg=""; if (page) printk(KERN_DEBUG "%s page #%lu: flags: %lx Count: %i, Mapcount: %i, Mapping/private/first_page/... %p. Index: %lu. lru.next: %p / prev: %p\n", msg,page_to_pfn(page), page->flags, page_count(page), page_mapcount(page), page->mapping, page->index, page->lru.next, page->lru.prev); else printk(KERN_DEBUG "%s page NULL\n", msg); }
static void put_compound_page(struct page *page) { if (unlikely(PageTail(page))) { /* __split_huge_page_refcount can run under us */ struct page *page_head = compound_trans_head(page); if (likely(page != page_head && get_page_unless_zero(page_head))) { unsigned long flags; /* * THP can not break up slab pages so avoid taking * compound_lock(). Slab performs non-atomic bit ops * on page->flags for better performance. In particular * slab_unlock() in slub used to be a hot path. It is * still hot on arches that do not support * this_cpu_cmpxchg_double(). */ if (PageSlab(page_head)) { if (PageTail(page)) { if (put_page_testzero(page_head)) VM_BUG_ON(1); atomic_dec(&page->_mapcount); goto skip_lock_tail; } else goto skip_lock; } /* * page_head wasn't a dangling pointer but it * may not be a head page anymore by the time * we obtain the lock. That is ok as long as it * can't be freed from under us. */ flags = compound_lock_irqsave(page_head); if (unlikely(!PageTail(page))) { /* __split_huge_page_refcount run before us */ compound_unlock_irqrestore(page_head, flags); skip_lock: if (put_page_testzero(page_head)) __put_single_page(page_head); out_put_single: if (put_page_testzero(page)) __put_single_page(page); return; } VM_BUG_ON(page_head != page->first_page); /* * We can release the refcount taken by * get_page_unless_zero() now that * __split_huge_page_refcount() is blocked on * the compound_lock. */ if (put_page_testzero(page_head)) VM_BUG_ON(1); /* __split_huge_page_refcount will wait now */ VM_BUG_ON(page_mapcount(page) <= 0); atomic_dec(&page->_mapcount); VM_BUG_ON(atomic_read(&page_head->_count) <= 0); VM_BUG_ON(atomic_read(&page->_count) != 0); compound_unlock_irqrestore(page_head, flags); skip_lock_tail: if (put_page_testzero(page_head)) { if (PageHead(page_head)) __put_compound_page(page_head); else __put_single_page(page_head); } } else { /* page_head is a dangling pointer */ VM_BUG_ON(PageTail(page)); goto out_put_single; } } else if (put_page_testzero(page)) { if (PageHead(page)) __put_compound_page(page); else __put_single_page(page); } }
static void put_compound_page(struct page *page) { if (unlikely(PageTail(page))) { /* __split_huge_page_refcount can run under us */ struct page *page_head = compound_trans_head(page); if (likely(page != page_head && get_page_unless_zero(page_head))) { unsigned long flags; if (PageHeadHuge(page_head)) { if (likely(PageTail(page))) { /* * __split_huge_page_refcount * cannot race here. */ VM_BUG_ON(!PageHead(page_head)); atomic_dec(&page->_mapcount); if (put_page_testzero(page_head)) VM_BUG_ON(1); if (put_page_testzero(page_head)) __put_compound_page(page_head); return; } else { /* * __split_huge_page_refcount * run before us, "page" was a * THP tail. The split * page_head has been freed * and reallocated as slab or * hugetlbfs page of smaller * order (only possible if * reallocated as slab on * x86). */ goto skip_lock; } } /* * page_head wasn't a dangling pointer but it * may not be a head page anymore by the time * we obtain the lock. That is ok as long as it * can't be freed from under us. */ flags = compound_lock_irqsave(page_head); if (unlikely(!PageTail(page))) { /* __split_huge_page_refcount run before us */ compound_unlock_irqrestore(page_head, flags); VM_BUG_ON(PageHead(page_head)); skip_lock: if (put_page_testzero(page_head)) { /* * The head page may have been * freed and reallocated as a * compound page of smaller * order and then freed again. * All we know is that it * cannot have become: a THP * page, a compound page of * higher order, a tail page. * That is because we still * hold the refcount of the * split THP tail and * page_head was the THP head * before the split. */ if (PageHead(page_head)) __put_compound_page(page_head); else __put_single_page(page_head); } out_put_single: if (put_page_testzero(page)) __put_single_page(page); return; } VM_BUG_ON(page_head != page->first_page); /* * We can release the refcount taken by * get_page_unless_zero() now that * __split_huge_page_refcount() is blocked on * the compound_lock. */ if (put_page_testzero(page_head)) VM_BUG_ON(1); /* __split_huge_page_refcount will wait now */ VM_BUG_ON(page_mapcount(page) <= 0); atomic_dec(&page->_mapcount); VM_BUG_ON(atomic_read(&page_head->_count) <= 0); VM_BUG_ON(atomic_read(&page->_count) != 0); compound_unlock_irqrestore(page_head, flags); if (put_page_testzero(page_head)) { if (PageHead(page_head)) __put_compound_page(page_head); else __put_single_page(page_head); } } else { /* page_head is a dangling pointer */ VM_BUG_ON(PageTail(page)); goto out_put_single; } } else if (put_page_testzero(page)) { if (PageHead(page)) __put_compound_page(page); else __put_single_page(page); } }
u64 stable_page_flags(struct page *page) { u64 k; u64 u; /* * pseudo flag: KPF_NOPAGE * it differentiates a memory hole from a page with no flags */ if (!page) return 1 << KPF_NOPAGE; k = page->flags; u = 0; /* * pseudo flags for the well known (anonymous) memory mapped pages * * Note that page->_mapcount is overloaded in SLOB/SLUB/SLQB, so the * simple test in page_mapcount() is not enough. */ if (!PageSlab(page) && page_mapcount(page)) u |= 1 << KPF_MMAP; if (PageAnon(page)) u |= 1 << KPF_ANON; if (PageKsm(page)) u |= 1 << KPF_KSM; /* * compound pages: export both head/tail info * they together define a compound page's start/end pos and order */ if (PageHead(page)) u |= 1 << KPF_COMPOUND_HEAD; if (PageTail(page)) u |= 1 << KPF_COMPOUND_TAIL; if (PageHuge(page)) u |= 1 << KPF_HUGE; /* * PageTransCompound can be true for non-huge compound pages (slab * pages or pages allocated by drivers with __GFP_COMP) because it * just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon * to make sure a given page is a thp, not a non-huge compound page. */ else if (PageTransCompound(page)) { struct page *head = compound_head(page); if (PageLRU(head) || PageAnon(head)) u |= 1 << KPF_THP; else if (is_huge_zero_page(head)) { u |= 1 << KPF_ZERO_PAGE; u |= 1 << KPF_THP; } } else if (is_zero_pfn(page_to_pfn(page))) u |= 1 << KPF_ZERO_PAGE; /* * Caveats on high order pages: page->_count will only be set * -1 on the head page; SLUB/SLQB do the same for PG_slab; * SLOB won't set PG_slab at all on compound pages. */ if (PageBuddy(page)) u |= 1 << KPF_BUDDY; if (PageBalloon(page)) u |= 1 << KPF_BALLOON; if (page_is_idle(page)) u |= 1 << KPF_IDLE; u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked); u |= kpf_copy_bit(k, KPF_SLAB, PG_slab); u |= kpf_copy_bit(k, KPF_ERROR, PG_error); u |= kpf_copy_bit(k, KPF_DIRTY, PG_dirty); u |= kpf_copy_bit(k, KPF_UPTODATE, PG_uptodate); u |= kpf_copy_bit(k, KPF_WRITEBACK, PG_writeback); u |= kpf_copy_bit(k, KPF_LRU, PG_lru); u |= kpf_copy_bit(k, KPF_REFERENCED, PG_referenced); u |= kpf_copy_bit(k, KPF_ACTIVE, PG_active); u |= kpf_copy_bit(k, KPF_RECLAIM, PG_reclaim); u |= kpf_copy_bit(k, KPF_SWAPCACHE, PG_swapcache); u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked); u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable); u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked); #ifdef CONFIG_MEMORY_FAILURE u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison); #endif #ifdef CONFIG_ARCH_USES_PG_UNCACHED u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached); #endif u |= kpf_copy_bit(k, KPF_RESERVED, PG_reserved); u |= kpf_copy_bit(k, KPF_MAPPEDTODISK, PG_mappedtodisk); u |= kpf_copy_bit(k, KPF_PRIVATE, PG_private); u |= kpf_copy_bit(k, KPF_PRIVATE_2, PG_private_2); u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE, PG_owner_priv_1); u |= kpf_copy_bit(k, KPF_ARCH, PG_arch_1); return u; };