Exemple #1
0
static void end_swap_bio_write(struct bio *bio, int err)
{
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct page *page = bio->bi_io_vec[0].bv_page;

	if (!uptodate) {
		SetPageError(page);
		/*
		 * We failed to write the page out to swap-space.
		 * Re-dirty the page in order to avoid it being reclaimed.
		 * Also print a dire warning that things will go BAD (tm)
		 * very quickly.
		 *
		 * Also clear PG_reclaim to avoid rotate_reclaimable_page()
		 */
		set_page_dirty(page);
		printk(KERN_ALERT "Write-error on swap-device (%u:%u:%Lu)\n",
				imajor(bio->bi_bdev->bd_inode),
				iminor(bio->bi_bdev->bd_inode),
				(unsigned long long)bio->bi_sector);
		ClearPageReclaim(page);
	}
	end_page_writeback(page);
	bio_put(bio);
}
/*
 * Dirty a page.
 *
 * For pages with a mapping this should be done under the page lock
 * for the benefit of asynchronous memory errors who prefer a consistent
 * dirty state. This rule can be broken in some special cases,
 * but should be better not to.
 *
 * If the mapping doesn't provide a set_page_dirty a_op, then
 * just fall through and assume that it wants buffer_heads.
 */
int set_page_dirty(struct page *page)
{
	struct address_space *mapping = page_mapping(page);

	if (likely(mapping)) {
		int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
		/*
		 * readahead/lru_deactivate_page could remain
		 * PG_readahead/PG_reclaim due to race with end_page_writeback
		 * About readahead, if the page is written, the flags would be
		 * reset. So no problem.
		 * About lru_deactivate_page, if the page is redirty, the flag
		 * will be reset. So no problem. but if the page is used by readahead
		 * it will confuse readahead and make it restart the size rampup
		 * process. But it's a trivial problem.
		 */
		ClearPageReclaim(page);
#ifdef CONFIG_BLOCK
		if (!spd)
			spd = __set_page_dirty_buffers;
#endif
		return (*spd)(page);
	}
	if (!PageDirty(page)) {
		if (!TestSetPageDirty(page))
			return 1;
	}
	return 0;
}
/*
 * Clear a page's dirty flag, while caring for dirty memory accounting.
 * Returns true if the page was previously dirty.
 *
 * This is for preparing to put the page under writeout.  We leave the page
 * tagged as dirty in the radix tree so that a concurrent write-for-sync
 * can discover it via a PAGECACHE_TAG_DIRTY walk.  The ->writepage
 * implementation will run either set_page_writeback() or set_page_dirty(),
 * at which stage we bring the page's dirty flag and radix-tree dirty tag
 * back into sync.
 *
 * This incoherency between the page's dirty flag and radix-tree tag is
 * unfortunate, but it only exists while the page is locked.
 */
int clear_page_dirty_for_io(struct page *page)
{
	struct address_space *mapping = page_mapping(page);

	BUG_ON(!PageLocked(page));

	ClearPageReclaim(page);
	if (mapping && mapping_cap_account_dirty(mapping)) {
		/*
		 * Yes, Virginia, this is indeed insane.
		 *
		 * We use this sequence to make sure that
		 *  (a) we account for dirty stats properly
		 *  (b) we tell the low-level filesystem to
		 *      mark the whole page dirty if it was
		 *      dirty in a pagetable. Only to then
		 *  (c) clean the page again and return 1 to
		 *      cause the writeback.
		 *
		 * This way we avoid all nasty races with the
		 * dirty bit in multiple places and clearing
		 * them concurrently from different threads.
		 *
		 * Note! Normally the "set_page_dirty(page)"
		 * has no effect on the actual dirty bit - since
		 * that will already usually be set. But we
		 * need the side effects, and it can help us
		 * avoid races.
		 *
		 * We basically use the page "master dirty bit"
		 * as a serialization point for all the different
		 * threads doing their things.
		 */
		if (page_mkclean(page))
			set_page_dirty(page);
		/*
		 * We carefully synchronise fault handlers against
		 * installing a dirty pte and marking the page dirty
		 * at this point. We do this by having them hold the
		 * page lock at some point after installing their
		 * pte, but before marking the page dirty.
		 * Pages are always locked coming in here, so we get
		 * the desired exclusion. See mm/memory.c:do_wp_page()
		 * for more comments.
		 */
		if (TestClearPageDirty(page)) {
			dec_zone_page_state(page, NR_FILE_DIRTY);
			dec_bdi_stat(mapping->backing_dev_info,
					BDI_RECLAIMABLE);
			return 1;
		}
		return 0;
	}
	return TestClearPageDirty(page);
}
Exemple #4
0
static int tux3_set_page_dirty_bug(struct page *page)
{
	/* See comment of tux3_set_page_dirty() */
	ClearPageReclaim(page);

	assert(0);
	/* This page should not be mmapped */
	assert(!page_mapped(page));
	/* This page should be dirty already, otherwise we will lost data. */
	assert(PageDirty(page));
	return 0;
}
Exemple #5
0
/* Copy of set_page_dirty() */
static int tux3_set_page_dirty(struct page *page)
{
	/*
	 * readahead/lru_deactivate_page could remain
	 * PG_readahead/PG_reclaim due to race with end_page_writeback
	 * About readahead, if the page is written, the flags would be
	 * reset. So no problem.
	 * About lru_deactivate_page, if the page is redirty, the flag
	 * will be reset. So no problem. but if the page is used by readahead
	 * it will confuse readahead and make it restart the size rampup
	 * process. But it's a trivial problem.
	 */
	ClearPageReclaim(page);

	return tux3_set_page_dirty_buffers(page);
}
Exemple #6
0
static int tux3_set_page_dirty_assert(struct page *page)
{
	struct buffer_head *head, *buffer;

	/* See comment of tux3_set_page_dirty() */
	ClearPageReclaim(page);

	/* Is there any cases to be called for old page of forked page? */
	WARN_ON(PageForked(page));

	/* This page should be dirty already, otherwise we will lost data. */
	assert(PageDirty(page));
	/* All buffers should be dirty already, otherwise we will lost data. */
	assert(page_has_buffers(page));
	head = buffer = page_buffers(page);
	do {
		assert(buffer_dirty(buffer));
		buffer = buffer->b_this_page;
	} while (buffer != head);

	return 0;
}
Exemple #7
0
void end_swap_bio_write(struct bio *bio)
{
	struct page *page = bio->bi_io_vec[0].bv_page;

	if (bio->bi_error) {
		SetPageError(page);
		/*
		 * We failed to write the page out to swap-space.
		 * Re-dirty the page in order to avoid it being reclaimed.
		 * Also print a dire warning that things will go BAD (tm)
		 * very quickly.
		 *
		 * Also clear PG_reclaim to avoid rotate_reclaimable_page()
		 */
		set_page_dirty(page);
		pr_alert("Write-error on swap-device (%u:%u:%llu)\n",
			 imajor(bio->bi_bdev->bd_inode),
			 iminor(bio->bi_bdev->bd_inode),
			 (unsigned long long)bio->bi_iter.bi_sector);
		ClearPageReclaim(page);
	}
	end_page_writeback(page);
	bio_put(bio);
}
Exemple #8
0
/*
 * Try to free buffers if "page" has them.
 */
static int
remap_preparepage(struct page *page, int fastmode)
{
    struct address_space *mapping;
    int waitcnt = fastmode ? 0 : 10;

    BUG_ON(!PageLocked(page));

    mapping = page_mapping(page);

    if (PageWriteback(page) && !PagePrivate(page) && !PageSwapCache(page)) {
        printk("remap: mapping %p page %p\n", page->mapping, page);
        return -REMAPPREP_WB;
    }

    if (PageWriteback(page))
        wait_on_page_writeback(page);

    if (PagePrivate(page)) {
#ifdef DEBUG_MSG
        printk("rmap: process page with buffers...\n");
#endif
        /* XXX copied from shrink_list() */
        if (PageDirty(page) &&
                is_page_cache_freeable(page) &&
                mapping != NULL &&
                mapping->a_ops->writepage != NULL) {
            spin_lock_irq(&mapping->tree_lock);
            if (clear_page_dirty_for_io(page)) {
                int res;
                struct writeback_control wbc = {
                    .sync_mode = WB_SYNC_NONE,
                    .nr_to_write = SWAP_CLUSTER_MAX,
                    .nonblocking = 1,
                    .for_reclaim = 1,
                };

                spin_unlock_irq(&mapping->tree_lock);

                SetPageReclaim(page);
                res = mapping->a_ops->writepage(page, &wbc);

                if (res < 0)
                    /* not implemented. help */
                    BUG();
                if (res == WRITEPAGE_ACTIVATE) {
                    ClearPageReclaim(page);
                    return -REMAPPREP_WB;
                }
                if (!PageWriteback(page)) {
                    /* synchronous write or broken a_ops? */
                    ClearPageReclaim(page);
                }
                lock_page(page);
                if (!PagePrivate(page))
                    return 0;
            } else
                spin_unlock_irq(&mapping->tree_lock);
        }

        while (1) {
            if (try_to_release_page(page, GFP_KERNEL))
                break;
            if (!waitcnt)
                return -REMAPPREP_BUFFER;
            msleep(10);
            waitcnt--;
            if (!waitcnt)
                print_buffer(page);
        }
    }