void __page_frag_cache_drain(struct page *page, unsigned int count) { VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); if (page_ref_sub_and_test(page, count)) { unsigned int order = compound_order(page); /* * __free_pages_ok() is not exported so call * __free_pages() which decrements the ref counter * and increment the ref counter before. */ page_ref_inc(page); __free_pages(page, order); } }
static void destroy_pagetable_page(struct mm_struct *mm) { int count; void *pte_frag; struct page *page; pte_frag = mm->context.pte_frag; if (!pte_frag) return; page = virt_to_page(pte_frag); /* drop all the pending references */ count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT; /* We allow PTE_FRAG_NR fragments from a PTE page */ if (page_ref_sub_and_test(page, PTE_FRAG_NR - count)) { pgtable_page_dtor(page); free_unref_page(page); } }