Example #1
0
/*
 * Given a frontswap zpage in zcache (identified by type/offset) and
 * an empty page, put the page into the swap cache, use frontswap
 * to get the page from zcache into the empty page, then give it
 * to the swap subsystem to send to disk (carefully avoiding the
 * possibility that frontswap might snatch it back).
 * Returns < 0 if error, 0 if successful, and 1 if successful but
 * the newpage passed in not needed and should be freed.
 */
static int zcache_frontswap_writeback_zpage(int type, pgoff_t offset,
					struct page *newpage)
{
	struct page *page = newpage;
	int ret;
	struct writeback_control wbc = {
		.sync_mode = WB_SYNC_NONE,
	};

	ret = zcache_get_swap_cache_page(type, offset, page);
	if (ret < 0)
		return ret;
	else if (ret == 0) {
		/* more uptodate page is already in swapcache */
		__frontswap_invalidate_page(type, offset);
		return 1;
	}

	BUG_ON(!frontswap_has_exclusive_gets); /* load must also invalidate */
	/* FIXME: how is it possible to get here when page is unlocked? */
	__frontswap_load(page);
	SetPageUptodate(page);  /* above does SetPageDirty, is that enough? */

	/* start writeback */
	SetPageReclaim(page);
	/*
	 * Return value is ignored here because it doesn't change anything
	 * for us.  Page is returned unlocked.
	 */
	(void)__swap_writepage(page, &wbc, zcache_end_swap_write);
	page_cache_release(page);
	inc_zcache_outstanding_writeback_pages();

	return 0;
}
Example #2
0
/*
 * If the page can not be invalidated, it is moved to the
 * inactive list to speed up its reclaim.  It is moved to the
 * head of the list, rather than the tail, to give the flusher
 * threads some time to write it out, as this is much more
 * effective than the single-page writeout from reclaim.
 *
 * If the page isn't page_mapped and dirty/writeback, the page
 * could reclaim asap using PG_reclaim.
 *
 * 1. active, mapped page -> none
 * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
 * 3. inactive, mapped page -> none
 * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
 * 5. inactive, clean -> inactive, tail
 * 6. Others -> none
 *
 * In 4, why it moves inactive's head, the VM expects the page would
 * be write it out by flusher threads as this is much more effective
 * than the single-page writeout from reclaim.
 */
static void lru_deactivate_fn(struct page *page, void *arg)
{
	int lru, file;
	bool active;
	struct zone *zone = page_zone(page);

	if (!PageLRU(page))
		return;

	if (PageUnevictable(page))
		return;

	/* Some processes are using the page */
	if (page_mapped(page))
		return;

	active = PageActive(page);

	file = page_is_file_cache(page);
	lru = page_lru_base_type(page);
	del_page_from_lru_list(zone, page, lru + active);
	ClearPageActive(page);
	ClearPageReferenced(page);
	add_page_to_lru_list(zone, page, lru);

	if (PageWriteback(page) || PageDirty(page)) {
		/*
		 * PG_reclaim could be raced with end_page_writeback
		 * It can make readahead confusing.  But race window
		 * is _really_ small and  it's non-critical problem.
		 */
		SetPageReclaim(page);
	} else {
		struct lruvec *lruvec;
		/*
		 * The page's writeback ends up during pagevec
		 * We moves tha page into tail of inactive.
		 */
		lruvec = mem_cgroup_lru_move_lists(zone, page, lru, lru);
		list_move_tail(&page->lru, &lruvec->lists[lru]);
		__count_vm_event(PGROTATED);
	}

	if (active)
		__count_vm_event(PGDEACTIVATE);
	update_page_reclaim_stat(zone, page, file, 0);
}
/*
 * Attempts to free an entry by adding a page to the swap cache,
 * decompressing the entry data into the page, and issuing a
 * bio write to write the page back to the swap device.
 *
 * This can be thought of as a "resumed writeback" of the page
 * to the swap device.  We are basically resuming the same swap
 * writeback path that was intercepted with the frontswap_store()
 * in the first place.  After the page has been decompressed into
 * the swap cache, the compressed version stored by zswap can be
 * freed.
 */
static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
{
	struct zswap_header *zhdr;
	swp_entry_t swpentry;
	struct zswap_tree *tree;
	pgoff_t offset;
	struct zswap_entry *entry;
	struct page *page;
	u8 *src, *dst;
	unsigned int dlen;
	int ret;
	struct writeback_control wbc = {
		.sync_mode = WB_SYNC_NONE,
	};

	/* extract swpentry from data */
	zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO);
	swpentry = zhdr->swpentry; /* here */
	zpool_unmap_handle(pool, handle);
	tree = zswap_trees[swp_type(swpentry)];
	offset = swp_offset(swpentry);

	/* find and ref zswap entry */
	spin_lock(&tree->lock);
	entry = zswap_entry_find_get(&tree->rbroot, offset);
	if (!entry) {
		/* entry was invalidated */
		spin_unlock(&tree->lock);
		return 0;
	}
	spin_unlock(&tree->lock);
	BUG_ON(offset != entry->offset);

	/* try to allocate swap cache page */
	switch (zswap_get_swap_cache_page(swpentry, &page)) {
	case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
		ret = -ENOMEM;
		goto fail;

	case ZSWAP_SWAPCACHE_EXIST:
		/* page is already in the swap cache, ignore for now */
		page_cache_release(page);
		ret = -EEXIST;
		goto fail;

	case ZSWAP_SWAPCACHE_NEW: /* page is locked */
		/* decompress */
		dlen = PAGE_SIZE;
		src = (u8 *)zpool_map_handle(zswap_pool, entry->handle,
				ZPOOL_MM_RO) + sizeof(struct zswap_header);
		dst = kmap_atomic(page);
		ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src,
				entry->length, dst, &dlen);
		kunmap_atomic(dst);
		zpool_unmap_handle(zswap_pool, entry->handle);
		BUG_ON(ret);
		BUG_ON(dlen != PAGE_SIZE);

		/* page is up to date */
		SetPageUptodate(page);
	}

	/* move it to the tail of the inactive list after end_writeback */
	SetPageReclaim(page);

	/* start writeback */
	__swap_writepage(page, &wbc, end_swap_bio_write);
	page_cache_release(page);
	zswap_written_back_pages++;

	spin_lock(&tree->lock);
	/* drop local reference */
	zswap_entry_put(tree, entry);

	/*
	* There are two possible situations for entry here:
	* (1) refcount is 1(normal case),  entry is valid and on the tree
	* (2) refcount is 0, entry is freed and not on the tree
	*     because invalidate happened during writeback
	*  search the tree and free the entry if find entry
	*/
	if (entry == zswap_rb_search(&tree->rbroot, offset))
		zswap_entry_put(tree, entry);
	spin_unlock(&tree->lock);

	goto end;

	/*
	* if we get here due to ZSWAP_SWAPCACHE_EXIST
	* a load may happening concurrently
	* it is safe and okay to not free the entry
	* if we free the entry in the following put
	* it it either okay to return !0
	*/
fail:
	spin_lock(&tree->lock);
	zswap_entry_put(tree, entry);
	spin_unlock(&tree->lock);

end:
	return ret;
}
Example #4
0
/*
 * Try to free buffers if "page" has them.
 */
static int
remap_preparepage(struct page *page, int fastmode)
{
    struct address_space *mapping;
    int waitcnt = fastmode ? 0 : 10;

    BUG_ON(!PageLocked(page));

    mapping = page_mapping(page);

    if (PageWriteback(page) && !PagePrivate(page) && !PageSwapCache(page)) {
        printk("remap: mapping %p page %p\n", page->mapping, page);
        return -REMAPPREP_WB;
    }

    if (PageWriteback(page))
        wait_on_page_writeback(page);

    if (PagePrivate(page)) {
#ifdef DEBUG_MSG
        printk("rmap: process page with buffers...\n");
#endif
        /* XXX copied from shrink_list() */
        if (PageDirty(page) &&
                is_page_cache_freeable(page) &&
                mapping != NULL &&
                mapping->a_ops->writepage != NULL) {
            spin_lock_irq(&mapping->tree_lock);
            if (clear_page_dirty_for_io(page)) {
                int res;
                struct writeback_control wbc = {
                    .sync_mode = WB_SYNC_NONE,
                    .nr_to_write = SWAP_CLUSTER_MAX,
                    .nonblocking = 1,
                    .for_reclaim = 1,
                };

                spin_unlock_irq(&mapping->tree_lock);

                SetPageReclaim(page);
                res = mapping->a_ops->writepage(page, &wbc);

                if (res < 0)
                    /* not implemented. help */
                    BUG();
                if (res == WRITEPAGE_ACTIVATE) {
                    ClearPageReclaim(page);
                    return -REMAPPREP_WB;
                }
                if (!PageWriteback(page)) {
                    /* synchronous write or broken a_ops? */
                    ClearPageReclaim(page);
                }
                lock_page(page);
                if (!PagePrivate(page))
                    return 0;
            } else
                spin_unlock_irq(&mapping->tree_lock);
        }

        while (1) {
            if (try_to_release_page(page, GFP_KERNEL))
                break;
            if (!waitcnt)
                return -REMAPPREP_BUFFER;
            msleep(10);
            waitcnt--;
            if (!waitcnt)
                print_buffer(page);
        }
    }