/* * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space, * but sets SwapCache flag and private instead of mapping and index. */ int __add_to_swap_cache(struct page *page, swp_entry_t entry) { int error, i, nr = hpage_nr_pages(page); struct address_space *address_space; pgoff_t idx = swp_offset(entry); VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(PageSwapCache(page), page); VM_BUG_ON_PAGE(!PageSwapBacked(page), page); page_ref_add(page, nr); SetPageSwapCache(page); address_space = swap_address_space(entry); spin_lock_irq(&address_space->tree_lock); for (i = 0; i < nr; i++) { set_page_private(page + i, entry.val + i); error = radix_tree_insert(&address_space->page_tree, idx + i, page + i); if (unlikely(error)) break; } if (likely(!error)) { address_space->nrpages += nr; __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); ADD_CACHE_INFO(add_total, nr); } else { /* * Only the context which have set SWAP_HAS_CACHE flag * would call add_to_swap_cache(). * So add_to_swap_cache() doesn't returns -EEXIST. */ VM_BUG_ON(error == -EEXIST); set_page_private(page + i, 0UL); while (i--) { radix_tree_delete(&address_space->page_tree, idx + i); set_page_private(page + i, 0UL); } ClearPageSwapCache(page); page_ref_sub(page, nr); } spin_unlock_irq(&address_space->tree_lock); return error; }
static int mlx4_alloc_pages(struct mlx4_en_priv *priv, struct mlx4_en_rx_alloc *page_alloc, const struct mlx4_en_frag_info *frag_info, gfp_t _gfp) { int order; struct page *page; dma_addr_t dma; for (order = frag_info->order; ;) { gfp_t gfp = _gfp; if (order) gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NOMEMALLOC; page = alloc_pages(gfp, order); if (likely(page)) break; if (--order < 0 || ((PAGE_SIZE << order) < frag_info->frag_size)) return -ENOMEM; } dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE << order, frag_info->dma_dir); if (unlikely(dma_mapping_error(priv->ddev, dma))) { put_page(page); return -ENOMEM; } page_alloc->page_size = PAGE_SIZE << order; page_alloc->page = page; page_alloc->dma = dma; page_alloc->page_offset = 0; /* Not doing get_page() for each frag is a big win * on asymetric workloads. Note we can not use atomic_set(). */ page_ref_add(page, page_alloc->page_size / frag_info->frag_stride - 1); return 0; }