void nilfs_page_bug(struct page *page) { struct address_space *m; unsigned long ino; if (unlikely(!page)) { printk(KERN_CRIT "NILFS_PAGE_BUG(NULL)\n"); return; } m = page->mapping; ino = m ? m->host->i_ino : 0; printk(KERN_CRIT "NILFS_PAGE_BUG(%p): cnt=%d index#=%llu flags=0x%lx " "mapping=%p ino=%lu\n", page, page_ref_count(page), (unsigned long long)page->index, page->flags, m, ino); if (page_has_buffers(page)) { struct buffer_head *bh, *head; int i = 0; bh = head = page_buffers(page); do { printk(KERN_CRIT " BH[%d] %p: cnt=%d block#=%llu state=0x%lx\n", i++, bh, atomic_read(&bh->b_count), (unsigned long long)bh->b_blocknr, bh->b_state); bh = bh->b_this_page; } while (bh != head); } }
void __dump_page(struct page *page, const char *reason) { struct address_space *mapping = page_mapping(page); bool page_poisoned = PagePoisoned(page); int mapcount; /* * If struct page is poisoned don't access Page*() functions as that * leads to recursive loop. Page*() check for poisoned pages, and calls * dump_page() when detected. */ if (page_poisoned) { pr_warn("page:%px is uninitialized and poisoned", page); goto hex_only; } /* * Avoid VM_BUG_ON() in page_mapcount(). * page->_mapcount space in struct page is used by sl[aou]b pages to * encode own info. */ mapcount = PageSlab(page) ? 0 : page_mapcount(page); pr_warn("page:%px count:%d mapcount:%d mapping:%px index:%#lx", page, page_ref_count(page), mapcount, page->mapping, page_to_pgoff(page)); if (PageCompound(page)) pr_cont(" compound_mapcount: %d", compound_mapcount(page)); pr_cont("\n"); if (PageAnon(page)) pr_warn("anon "); else if (PageKsm(page)) pr_warn("ksm "); else if (mapping) { pr_warn("%ps ", mapping->a_ops); if (mapping->host->i_dentry.first) { struct dentry *dentry; dentry = container_of(mapping->host->i_dentry.first, struct dentry, d_u.d_alias); pr_warn("name:\"%pd\" ", dentry); } } BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1); pr_warn("flags: %#lx(%pGp)\n", page->flags, &page->flags); hex_only: print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32, sizeof(unsigned long), page, sizeof(struct page), false); if (reason) pr_warn("page dumped because: %s\n", reason); #ifdef CONFIG_MEMCG if (!page_poisoned && page->mem_cgroup) pr_warn("page->mem_cgroup:%px\n", page->mem_cgroup); #endif }
void __page_frag_cache_drain(struct page *page, unsigned int count) { VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); if (page_ref_sub_and_test(page, count)) { unsigned int order = compound_order(page); /* * __free_pages_ok() is not exported so call * __free_pages() which decrements the ref counter * and increment the ref counter before. */ page_ref_inc(page); __free_pages(page, order); } }
void __dump_page(struct page *page, const char *reason) { bool page_poisoned = PagePoisoned(page); int mapcount; /* * If struct page is poisoned don't access Page*() functions as that * leads to recursive loop. Page*() check for poisoned pages, and calls * dump_page() when detected. */ if (page_poisoned) { pr_emerg("page:%px is uninitialized and poisoned", page); goto hex_only; } /* * Avoid VM_BUG_ON() in page_mapcount(). * page->_mapcount space in struct page is used by sl[aou]b pages to * encode own info. */ mapcount = PageSlab(page) ? 0 : page_mapcount(page); pr_emerg("page:%px count:%d mapcount:%d mapping:%px index:%#lx", page, page_ref_count(page), mapcount, page->mapping, page_to_pgoff(page)); if (PageCompound(page)) pr_cont(" compound_mapcount: %d", compound_mapcount(page)); pr_cont("\n"); BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1); pr_emerg("flags: %#lx(%pGp)\n", page->flags, &page->flags); hex_only: print_hex_dump(KERN_ALERT, "raw: ", DUMP_PREFIX_NONE, 32, sizeof(unsigned long), page, sizeof(struct page), false); if (reason) pr_alert("page dumped because: %s\n", reason); #ifdef CONFIG_MEMCG if (!page_poisoned && page->mem_cgroup) pr_alert("page->mem_cgroup:%px\n", page->mem_cgroup); #endif }
static int mlx4_en_init_allocator(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring) { int i; struct mlx4_en_rx_alloc *page_alloc; for (i = 0; i < priv->num_frags; i++) { const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i]; if (mlx4_alloc_pages(priv, &ring->page_alloc[i], frag_info, GFP_KERNEL | __GFP_COLD)) goto out; en_dbg(DRV, priv, " frag %d allocator: - size:%d frags:%d\n", i, ring->page_alloc[i].page_size, page_ref_count(ring->page_alloc[i].page)); } return 0; out: while (i--) { struct page *page; page_alloc = &ring->page_alloc[i]; dma_unmap_page(priv->ddev, page_alloc->dma, page_alloc->page_size, priv->frag_info[i].dma_dir); page = page_alloc->page; /* Revert changes done by mlx4_alloc_pages */ page_ref_sub(page, page_alloc->page_size / priv->frag_info[i].frag_stride - 1); put_page(page); page_alloc->page = NULL; } return -ENOMEM; }