コード例 #1
0
ファイル: recovery.c プロジェクト: avagin/linux
int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
{
	struct list_head inode_list, tmp_inode_list;
	struct list_head dir_list;
	int err;
	int ret = 0;
	unsigned long s_flags = sbi->sb->s_flags;
	bool need_writecp = false;
#ifdef CONFIG_QUOTA
	int quota_enabled;
#endif

	if (s_flags & SB_RDONLY) {
		f2fs_msg(sbi->sb, KERN_INFO,
				"recover fsync data on readonly fs");
		sbi->sb->s_flags &= ~SB_RDONLY;
	}

#ifdef CONFIG_QUOTA
	/* Needed for iput() to work correctly and not trash data */
	sbi->sb->s_flags |= SB_ACTIVE;
	/* Turn on quotas so that they are updated correctly */
	quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
#endif

	fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
			sizeof(struct fsync_inode_entry));
	if (!fsync_entry_slab) {
		err = -ENOMEM;
		goto out;
	}

	INIT_LIST_HEAD(&inode_list);
	INIT_LIST_HEAD(&tmp_inode_list);
	INIT_LIST_HEAD(&dir_list);

	/* prevent checkpoint */
	mutex_lock(&sbi->cp_mutex);

	/* step #1: find fsynced inode numbers */
	err = find_fsync_dnodes(sbi, &inode_list, check_only);
	if (err || list_empty(&inode_list))
		goto skip;

	if (check_only) {
		ret = 1;
		goto skip;
	}

	need_writecp = true;

	/* step #2: recover data */
	err = recover_data(sbi, &inode_list, &tmp_inode_list, &dir_list);
	if (!err)
		f2fs_bug_on(sbi, !list_empty(&inode_list));
	else {
		/* restore s_flags to let iput() trash data */
		sbi->sb->s_flags = s_flags;
	}
skip:
	destroy_fsync_dnodes(&inode_list, err);
	destroy_fsync_dnodes(&tmp_inode_list, err);

	/* truncate meta pages to be used by the recovery */
	truncate_inode_pages_range(META_MAPPING(sbi),
			(loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);

	if (err) {
		truncate_inode_pages_final(NODE_MAPPING(sbi));
		truncate_inode_pages_final(META_MAPPING(sbi));
	} else {
		clear_sbi_flag(sbi, SBI_POR_DOING);
	}
	mutex_unlock(&sbi->cp_mutex);

	/* let's drop all the directory inodes for clean checkpoint */
	destroy_fsync_dnodes(&dir_list, err);

	if (need_writecp) {
		set_sbi_flag(sbi, SBI_IS_RECOVERED);

		if (!err) {
			struct cp_control cpc = {
				.reason = CP_RECOVERY,
			};
			err = f2fs_write_checkpoint(sbi, &cpc);
		}
	}

	kmem_cache_destroy(fsync_entry_slab);
out:
#ifdef CONFIG_QUOTA
	/* Turn quotas off */
	if (quota_enabled)
		f2fs_quota_off_umount(sbi->sb);
#endif
	sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */

	return ret ? ret: err;
}
コード例 #2
0
ファイル: data.c プロジェクト: aejsmith/linux
/*
 * This function was originally taken from fs/mpage.c, and customized for f2fs.
 * Major change was from block_size == page_size in f2fs by default.
 */
static int f2fs_mpage_readpages(struct address_space *mapping,
                                struct list_head *pages, struct page *page,
                                unsigned nr_pages)
{
    struct bio *bio = NULL;
    unsigned page_idx;
    sector_t last_block_in_bio = 0;
    struct inode *inode = mapping->host;
    const unsigned blkbits = inode->i_blkbits;
    const unsigned blocksize = 1 << blkbits;
    sector_t block_in_file;
    sector_t last_block;
    sector_t last_block_in_file;
    sector_t block_nr;
    struct block_device *bdev = inode->i_sb->s_bdev;
    struct f2fs_map_blocks map;

    map.m_pblk = 0;
    map.m_lblk = 0;
    map.m_len = 0;
    map.m_flags = 0;

    for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {

        prefetchw(&page->flags);
        if (pages) {
            page = list_entry(pages->prev, struct page, lru);
            list_del(&page->lru);
            if (add_to_page_cache_lru(page, mapping,
                                      page->index, GFP_KERNEL))
                goto next_page;
        }

        block_in_file = (sector_t)page->index;
        last_block = block_in_file + nr_pages;
        last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
                             blkbits;
        if (last_block > last_block_in_file)
            last_block = last_block_in_file;

        /*
         * Map blocks using the previous result first.
         */
        if ((map.m_flags & F2FS_MAP_MAPPED) &&
                block_in_file > map.m_lblk &&
                block_in_file < (map.m_lblk + map.m_len))
            goto got_it;

        /*
         * Then do more f2fs_map_blocks() calls until we are
         * done with this page.
         */
        map.m_flags = 0;

        if (block_in_file < last_block) {
            map.m_lblk = block_in_file;
            map.m_len = last_block - block_in_file;

            if (f2fs_map_blocks(inode, &map, 0, false))
                goto set_error_page;
        }
got_it:
        if ((map.m_flags & F2FS_MAP_MAPPED)) {
            block_nr = map.m_pblk + block_in_file - map.m_lblk;
            SetPageMappedToDisk(page);

            if (!PageUptodate(page) && !cleancache_get_page(page)) {
                SetPageUptodate(page);
                goto confused;
            }
        } else {
            zero_user_segment(page, 0, PAGE_CACHE_SIZE);
            SetPageUptodate(page);
            unlock_page(page);
            goto next_page;
        }

        /*
         * This page will go to BIO.  Do we need to send this
         * BIO off first?
         */
        if (bio && (last_block_in_bio != block_nr - 1)) {
submit_and_realloc:
            submit_bio(READ, bio);
            bio = NULL;
        }
        if (bio == NULL) {
            struct f2fs_crypto_ctx *ctx = NULL;

            if (f2fs_encrypted_inode(inode) &&
                    S_ISREG(inode->i_mode)) {
                struct page *cpage;

                ctx = f2fs_get_crypto_ctx(inode);
                if (IS_ERR(ctx))
                    goto set_error_page;

                /* wait the page to be moved by cleaning */
                cpage = find_lock_page(
                            META_MAPPING(F2FS_I_SB(inode)),
                            block_nr);
                if (cpage) {
                    f2fs_wait_on_page_writeback(cpage,
                                                DATA);
                    f2fs_put_page(cpage, 1);
                }
            }

            bio = bio_alloc(GFP_KERNEL,
                            min_t(int, nr_pages, BIO_MAX_PAGES));
            if (!bio) {
                if (ctx)
                    f2fs_release_crypto_ctx(ctx);
                goto set_error_page;
            }
            bio->bi_bdev = bdev;
            bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr);
            bio->bi_end_io = f2fs_read_end_io;
            bio->bi_private = ctx;
        }

        if (bio_add_page(bio, page, blocksize, 0) < blocksize)
            goto submit_and_realloc;

        last_block_in_bio = block_nr;
        goto next_page;
set_error_page:
        SetPageError(page);
        zero_user_segment(page, 0, PAGE_CACHE_SIZE);
        unlock_page(page);
        goto next_page;
confused:
        if (bio) {
            submit_bio(READ, bio);
            bio = NULL;
        }
        unlock_page(page);
next_page:
        if (pages)
            page_cache_release(page);
    }
    BUG_ON(pages && !list_empty(pages));
    if (bio)
        submit_bio(READ, bio);
    return 0;
}