Beispiel #1
0
int do_write_data_page(struct f2fs_io_info *fio)
{
    struct page *page = fio->page;
    struct inode *inode = page->mapping->host;
    struct dnode_of_data dn;
    int err = 0;

    set_new_dnode(&dn, inode, NULL, NULL, 0);
    err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
    if (err)
        return err;

    fio->blk_addr = dn.data_blkaddr;

    /* This page is already truncated */
    if (fio->blk_addr == NULL_ADDR) {
        ClearPageUptodate(page);
        goto out_writepage;
    }

    if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
        fio->encrypted_page = f2fs_encrypt(inode, fio->page);
        if (IS_ERR(fio->encrypted_page)) {
            err = PTR_ERR(fio->encrypted_page);
            goto out_writepage;
        }
    }

    set_page_writeback(page);

    /*
     * If current allocation needs SSR,
     * it had better in-place writes for updated data.
     */
    if (unlikely(fio->blk_addr != NEW_ADDR &&
                 !is_cold_data(page) &&
                 need_inplace_update(inode))) {
        rewrite_data_page(fio);
        set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
        trace_f2fs_do_write_data_page(page, IPU);
    } else {
        write_data_page(&dn, fio);
        set_data_blkaddr(&dn);
        f2fs_update_extent_cache(&dn);
        trace_f2fs_do_write_data_page(page, OPU);
        set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
        if (page->index == 0)
            set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
    }
out_writepage:
    f2fs_put_dnode(&dn);
    return err;
}
Beispiel #2
0
static int __f2fs_convert_inline_data(struct inode *inode, struct page *page)
{
	int err;
	struct page *ipage;
	struct dnode_of_data dn;
	void *src_addr, *dst_addr;
	block_t new_blk_addr;
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
	struct f2fs_io_info fio = {
		.type = DATA,
		.rw = WRITE_SYNC | REQ_PRIO,
	};

	f2fs_lock_op(sbi);
	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage))
		return PTR_ERR(ipage);

	/*
	 * i_addr[0] is not used for inline data,
	 * so reserving new block will not destroy inline data
	 */
	set_new_dnode(&dn, inode, ipage, NULL, 0);
	err = f2fs_reserve_block(&dn, 0);
	if (err) {
		f2fs_unlock_op(sbi);
		return err;
	}

	zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);

	/* Copy the whole inline data block */
	src_addr = inline_data_addr(ipage);
	dst_addr = kmap(page);
	memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
	kunmap(page);
	SetPageUptodate(page);

	/* write data page to try to make data consistent */
	set_page_writeback(page);
	write_data_page(page, &dn, &new_blk_addr, &fio);
	update_extent_cache(new_blk_addr, &dn);
	f2fs_wait_on_page_writeback(page, DATA);

	/* clear inline data and flag after data writeback */
	zero_user_segment(ipage, INLINE_DATA_OFFSET,
				 INLINE_DATA_OFFSET + MAX_INLINE_DATA);
	clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
	stat_dec_inline_inode(inode);

	sync_inode_page(&dn);
	f2fs_put_dnode(&dn);
	f2fs_unlock_op(sbi);
	return err;
}

int f2fs_convert_inline_data(struct inode *inode, pgoff_t to_size)
{
	struct page *page;
	int err;

	if (!f2fs_has_inline_data(inode))
		return 0;
	else if (to_size <= MAX_INLINE_DATA)
		return 0;

	page = grab_cache_page_write_begin(inode->i_mapping, 0, AOP_FLAG_NOFS);
	if (!page)
		return -ENOMEM;

	err = __f2fs_convert_inline_data(inode, page);
	f2fs_put_page(page, 1);
	return err;
}

int f2fs_write_inline_data(struct inode *inode,
			   struct page *page, unsigned size)
{
	void *src_addr, *dst_addr;
	struct page *ipage;
	struct dnode_of_data dn;
	int err;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
	err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
	if (err)
		return err;
	ipage = dn.inode_page;

	zero_user_segment(ipage, INLINE_DATA_OFFSET,
				 INLINE_DATA_OFFSET + MAX_INLINE_DATA);
	src_addr = kmap(page);
	dst_addr = inline_data_addr(ipage);
	memcpy(dst_addr, src_addr, size);
	kunmap(page);

	/* Release the first data block if it is allocated */
	if (!f2fs_has_inline_data(inode)) {
		truncate_data_blocks_range(&dn, 1);
		set_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
		stat_inc_inline_inode(inode);
	}

	sync_inode_page(&dn);
	f2fs_put_dnode(&dn);

	return 0;
}
int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
{
    void *src_addr, *dst_addr;
    struct f2fs_io_info fio = {
        .sbi = F2FS_I_SB(dn->inode),
        .type = DATA,
        .rw = WRITE_SYNC | REQ_PRIO,
        .page = page,
        .encrypted_page = NULL,
    };
    int dirty, err;

    f2fs_bug_on(F2FS_I_SB(dn->inode), page->index);

    if (!f2fs_exist_data(dn->inode))
        goto clear_out;

    err = f2fs_reserve_block(dn, 0);
    if (err)
        return err;

    f2fs_wait_on_page_writeback(page, DATA);

    if (PageUptodate(page))
        goto no_update;

    zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);

    /* Copy the whole inline data block */
    src_addr = inline_data_addr(dn->inode_page);
    dst_addr = kmap_atomic(page);
    memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
    flush_dcache_page(page);
    kunmap_atomic(dst_addr);
    SetPageUptodate(page);
no_update:
    set_page_dirty(page);

    /* clear dirty state */
    dirty = clear_page_dirty_for_io(page);

    /* write data page to try to make data consistent */
    set_page_writeback(page);
    fio.blk_addr = dn->data_blkaddr;
    write_data_page(dn, &fio);
    set_data_blkaddr(dn);
    f2fs_update_extent_cache(dn);
    f2fs_wait_on_page_writeback(page, DATA);
    if (dirty)
        inode_dec_dirty_pages(dn->inode);

    /* this converted inline_data should be recovered. */
    set_inode_flag(F2FS_I(dn->inode), FI_APPEND_WRITE);

    /* clear inline data and flag after data writeback */
    truncate_inline_inode(dn->inode_page, 0);
clear_out:
    stat_dec_inline_inode(dn->inode);
    f2fs_clear_inline_inode(dn->inode);
    sync_inode_page(dn);
    f2fs_put_dnode(dn);
    return 0;
}

int f2fs_convert_inline_inode(struct inode *inode)
{
    struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
    struct dnode_of_data dn;
    struct page *ipage, *page;
    int err = 0;

    page = grab_cache_page(inode->i_mapping, 0);
    if (!page)
        return -ENOMEM;

    f2fs_lock_op(sbi);

    ipage = get_node_page(sbi, inode->i_ino);
    if (IS_ERR(ipage)) {
        err = PTR_ERR(ipage);
        goto out;
    }

    set_new_dnode(&dn, inode, ipage, ipage, 0);

    if (f2fs_has_inline_data(inode))
        err = f2fs_convert_inline_page(&dn, page);

    f2fs_put_dnode(&dn);
out:
    f2fs_unlock_op(sbi);

    f2fs_put_page(page, 1);
    return err;
}

int f2fs_write_inline_data(struct inode *inode, struct page *page)
{
    void *src_addr, *dst_addr;
    struct dnode_of_data dn;
    int err;

    set_new_dnode(&dn, inode, NULL, NULL, 0);
    err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
    if (err)
        return err;

    if (!f2fs_has_inline_data(inode)) {
        f2fs_put_dnode(&dn);
        return -EAGAIN;
    }

    f2fs_bug_on(F2FS_I_SB(inode), page->index);

    f2fs_wait_on_page_writeback(dn.inode_page, NODE);
    src_addr = kmap_atomic(page);
    dst_addr = inline_data_addr(dn.inode_page);
    memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
    kunmap_atomic(src_addr);

    set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
    set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);

    sync_inode_page(&dn);
    f2fs_put_dnode(&dn);
    return 0;
}
Beispiel #4
0
int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
{
	struct f2fs_io_info fio = {
		.sbi = F2FS_I_SB(dn->inode),
		.type = DATA,
		.op = REQ_OP_WRITE,
		.op_flags = REQ_SYNC | REQ_PRIO,
		.page = page,
		.encrypted_page = NULL,
	};
	int dirty, err;

	if (!f2fs_exist_data(dn->inode))
		goto clear_out;

	err = f2fs_reserve_block(dn, 0);
	if (err)
		return err;

	f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page));

	read_inline_data(page, dn->inode_page);
	set_page_dirty(page);

	/* clear dirty state */
	dirty = clear_page_dirty_for_io(page);

	/* write data page to try to make data consistent */
	set_page_writeback(page);
	fio.old_blkaddr = dn->data_blkaddr;
	set_inode_flag(dn->inode, FI_HOT_DATA);
	write_data_page(dn, &fio);
	f2fs_wait_on_page_writeback(page, DATA, true);
	if (dirty) {
		inode_dec_dirty_pages(dn->inode);
		remove_dirty_inode(dn->inode);
	}

	/* this converted inline_data should be recovered. */
	set_inode_flag(dn->inode, FI_APPEND_WRITE);

	/* clear inline data and flag after data writeback */
	truncate_inline_inode(dn->inode, dn->inode_page, 0);
	clear_inline_node(dn->inode_page);
clear_out:
	stat_dec_inline_inode(dn->inode);
	clear_inode_flag(dn->inode, FI_INLINE_DATA);
	f2fs_put_dnode(dn);
	return 0;
}

int f2fs_convert_inline_inode(struct inode *inode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct dnode_of_data dn;
	struct page *ipage, *page;
	int err = 0;

	if (!f2fs_has_inline_data(inode))
		return 0;

	page = f2fs_grab_cache_page(inode->i_mapping, 0, false);
	if (!page)
		return -ENOMEM;

	f2fs_lock_op(sbi);

	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto out;
	}

	set_new_dnode(&dn, inode, ipage, ipage, 0);

	if (f2fs_has_inline_data(inode))
		err = f2fs_convert_inline_page(&dn, page);

	f2fs_put_dnode(&dn);
out:
	f2fs_unlock_op(sbi);

	f2fs_put_page(page, 1);

	f2fs_balance_fs(sbi, dn.node_changed);

	return err;
}

int f2fs_write_inline_data(struct inode *inode, struct page *page)
{
	void *src_addr, *dst_addr;
	struct dnode_of_data dn;
	int err;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
	err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
	if (err)
		return err;

	if (!f2fs_has_inline_data(inode)) {
		f2fs_put_dnode(&dn);
		return -EAGAIN;
	}

	f2fs_bug_on(F2FS_I_SB(inode), page->index);

	f2fs_wait_on_page_writeback(dn.inode_page, NODE, true);
	src_addr = kmap_atomic(page);
	dst_addr = inline_data_addr(dn.inode_page);
	memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
	kunmap_atomic(src_addr);
	set_page_dirty(dn.inode_page);

	set_inode_flag(inode, FI_APPEND_WRITE);
	set_inode_flag(inode, FI_DATA_EXIST);

	clear_inline_node(dn.inode_page);
	f2fs_put_dnode(&dn);
	return 0;
}