bool recover_inline_data(struct inode *inode, struct page *npage)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct f2fs_inode *ri = NULL;
	void *src_addr, *dst_addr;
	struct page *ipage;

	/*
	 * The inline_data recovery policy is as follows.
	 * [prev.] [next] of inline_data flag
	 *    o       o  -> recover inline_data
	 *    o       x  -> remove inline_data, and then recover data blocks
	 *    x       o  -> remove inline_data, and then recover inline_data
	 *    x       x  -> recover data blocks
	 */
	if (IS_INODE(npage))
		ri = F2FS_INODE(npage);

	if (f2fs_has_inline_data(inode) &&
			ri && (ri->i_inline & F2FS_INLINE_DATA)) {
process_inline:
		ipage = get_node_page(sbi, inode->i_ino);
		f2fs_bug_on(sbi, IS_ERR(ipage));

		f2fs_wait_on_page_writeback(ipage, NODE);

		src_addr = inline_data_addr(npage);
		dst_addr = inline_data_addr(ipage);
		memcpy(dst_addr, src_addr, MAX_INLINE_DATA);

		set_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
		set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);

		update_inode(inode, ipage);
		f2fs_put_page(ipage, 1);
		return true;
	}

	if (f2fs_has_inline_data(inode)) {
		ipage = get_node_page(sbi, inode->i_ino);
		f2fs_bug_on(sbi, IS_ERR(ipage));
		truncate_inline_inode(ipage, 0);
		f2fs_clear_inline_inode(inode);
		update_inode(inode, ipage);
		f2fs_put_page(ipage, 1);
	} else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) {
		truncate_blocks(inode, 0, false);
		goto process_inline;
	}
	return false;
}
Exemple #2
0
static int f2fs_ioc_abort_volatile_write(struct file *filp)
{
	struct inode *inode = file_inode(filp);
	int ret;

	if (!inode_owner_or_capable(inode))
		return -EACCES;

	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

	f2fs_balance_fs(F2FS_I_SB(inode));

	if (f2fs_is_atomic_file(inode)) {
		commit_inmem_pages(inode, false);
		clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
	}

	if (f2fs_is_volatile_file(inode)) {
		clear_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
		filemap_fdatawrite(inode->i_mapping);
		set_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
	}
	mnt_drop_write_file(filp);
	return ret;
}
static int f2fs_link(struct dentry *old_dentry, struct inode *dir,
		struct dentry *dentry)
{
	struct inode *inode = old_dentry->d_inode;
	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
	int err;

	if (f2fs_encrypted_inode(dir) &&
		!f2fs_is_child_context_consistent_with_parent(dir, inode))
		return -EPERM;

	f2fs_balance_fs(sbi);

	inode->i_ctime = CURRENT_TIME;
	ihold(inode);

	set_inode_flag(F2FS_I(inode), FI_INC_LINK);
	f2fs_lock_op(sbi);
	err = f2fs_add_link(dentry, inode);
	if (err)
		goto out;
	f2fs_unlock_op(sbi);

	d_instantiate(dentry, inode);

	if (IS_DIRSYNC(dir))
		f2fs_sync_fs(sbi->sb, 1);
	return 0;
out:
	clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
	iput(inode);
	f2fs_unlock_op(sbi);
	return err;
}
Exemple #4
0
/*
 * NOTE: ipage is grabbed by caller, but if any error occurs, we should
 * release ipage in this function.
 */
static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
				struct f2fs_inline_dentry *inline_dentry)
{
	struct page *page;
	struct dnode_of_data dn;
	struct f2fs_dentry_block *dentry_blk;
	int err;

	page = grab_cache_page(dir->i_mapping, 0);
	if (!page) {
		f2fs_put_page(ipage, 1);
		return -ENOMEM;
	}

	set_new_dnode(&dn, dir, ipage, NULL, 0);
	err = f2fs_reserve_block(&dn, 0);
	if (err)
		goto out;

	f2fs_wait_on_page_writeback(page, DATA);
	zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);

	dentry_blk = kmap_atomic(page);

	/* copy data from inline dentry block to new dentry block */
	memcpy(dentry_blk->dentry_bitmap, inline_dentry->dentry_bitmap,
					INLINE_DENTRY_BITMAP_SIZE);
	memset(dentry_blk->dentry_bitmap + INLINE_DENTRY_BITMAP_SIZE, 0,
			SIZE_OF_DENTRY_BITMAP - INLINE_DENTRY_BITMAP_SIZE);
	/*
	 * we do not need to zero out remainder part of dentry and filename
	 * field, since we have used bitmap for marking the usage status of
	 * them, besides, we can also ignore copying/zeroing reserved space
	 * of dentry block, because them haven't been used so far.
	 */
	memcpy(dentry_blk->dentry, inline_dentry->dentry,
			sizeof(struct f2fs_dir_entry) * NR_INLINE_DENTRY);
	memcpy(dentry_blk->filename, inline_dentry->filename,
					NR_INLINE_DENTRY * F2FS_SLOT_LEN);

	kunmap_atomic(dentry_blk);
	SetPageUptodate(page);
	set_page_dirty(page);

	/* clear inline dir and flag after data writeback */
	truncate_inline_inode(ipage, 0);

	stat_dec_inline_dir(dir);
	clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY);

	if (i_size_read(dir) < PAGE_CACHE_SIZE) {
		i_size_write(dir, PAGE_CACHE_SIZE);
		set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
	}

	sync_inode_page(&dn);
out:
	f2fs_put_page(page, 1);
	return err;
}
Exemple #5
0
/*
 * Called at the last iput() if i_nlink is zero
 */
void f2fs_evict_inode(struct inode *inode)
{
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);

	trace_f2fs_evict_inode(inode);
	truncate_inode_pages(&inode->i_data, 0);

	if (inode->i_ino == F2FS_NODE_INO(sbi) ||
			inode->i_ino == F2FS_META_INO(sbi))
		goto no_delete;

	BUG_ON(atomic_read(&F2FS_I(inode)->dirty_dents));
	remove_dirty_dir_inode(inode);

	if (inode->i_nlink || is_bad_inode(inode))
		goto no_delete;

	set_inode_flag(F2FS_I(inode), FI_NO_ALLOC);
	i_size_write(inode, 0);

	if (F2FS_HAS_BLOCKS(inode))
		f2fs_truncate(inode);

	f2fs_lock_op(sbi);
	remove_inode_page(inode);
	f2fs_unlock_op(sbi);
no_delete:
	end_writeback(inode);
}
Exemple #6
0
static int f2fs_link(struct dentry *old_dentry, struct inode *dir,
		struct dentry *dentry)
{
	struct inode *inode = d_inode(old_dentry);
	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
	int err;

	if (f2fs_encrypted_inode(dir) &&
			!fscrypt_has_permitted_context(dir, inode))
		return -EPERM;

	f2fs_balance_fs(sbi, true);

	inode->i_ctime = current_time(inode);
	ihold(inode);

	set_inode_flag(inode, FI_INC_LINK);
	f2fs_lock_op(sbi);
	err = f2fs_add_link(dentry, inode);
	if (err)
		goto out;
	f2fs_unlock_op(sbi);

	d_instantiate(dentry, inode);

	if (IS_DIRSYNC(dir))
		f2fs_sync_fs(sbi->sb, 1);
	return 0;
out:
	clear_inode_flag(inode, FI_INC_LINK);
	iput(inode);
	f2fs_unlock_op(sbi);
	return err;
}
static int f2fs_link(struct dentry *old_dentry, struct inode *dir,
                     struct dentry *dentry)
{
    struct inode *inode = old_dentry->d_inode;
    struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
    int err;

    f2fs_balance_fs(sbi);

    inode->i_ctime = CURRENT_TIME;
    ihold(inode);

    set_inode_flag(F2FS_I(inode), FI_INC_LINK);
    f2fs_lock_op(sbi);
    err = f2fs_add_link(dentry, inode);
    f2fs_unlock_op(sbi);
    if (err)
        goto out;

    d_instantiate(dentry, inode);
    return 0;
out:
    clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
    iput(inode);
    return err;
}
/*
 * Called at the last iput() if i_nlink is zero
 */
void f2fs_evict_inode(struct inode *inode)
{
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);

	trace_f2fs_evict_inode(inode);
	truncate_inode_pages(&inode->i_data, 0);

	if (inode->i_ino == F2FS_NODE_INO(sbi) ||
			inode->i_ino == F2FS_META_INO(sbi))
		goto no_delete;

	f2fs_bug_on(get_dirty_dents(inode));
	remove_dirty_dir_inode(inode);

	if (inode->i_nlink || is_bad_inode(inode))
		goto no_delete;

	set_inode_flag(F2FS_I(inode), FI_NO_ALLOC);
	i_size_write(inode, 0);

	if (F2FS_HAS_BLOCKS(inode))
		f2fs_truncate(inode);

	f2fs_lock_op(sbi);
	remove_inode_page(inode);
	stat_dec_inline_inode(inode);
	f2fs_unlock_op(sbi);

no_delete:
	end_writeback(inode);
	invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, inode->i_ino);
}
Exemple #9
0
static int f2fs_ioc_commit_atomic_write(struct file *filp)
{
	struct inode *inode = file_inode(filp);
	int ret;

	if (!inode_owner_or_capable(inode))
		return -EACCES;

	if (f2fs_is_volatile_file(inode))
		return 0;

	ret = mnt_want_write_file(filp);
	if (ret)
		return ret;

	if (f2fs_is_atomic_file(inode)) {
		clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
		ret = commit_inmem_pages(inode);
		if (ret) {
			set_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
			goto err_out;
		}
	}

	ret = f2fs_sync_file(filp, 0, LLONG_MAX, 0);
err_out:
	mnt_drop_write_file(filp);
	return ret;
}
Exemple #10
0
/*
 * Called at the last iput() if i_nlink is zero
 */
void f2fs_evict_inode(struct inode *inode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct f2fs_inode_info *fi = F2FS_I(inode);
	nid_t xnid = fi->i_xattr_nid;

	/* some remained atomic pages should discarded */
	if (f2fs_is_atomic_file(inode))
		commit_inmem_pages(inode, true);

	trace_f2fs_evict_inode(inode);
	truncate_inode_pages(&inode->i_data, 0);

	if (inode->i_ino == F2FS_NODE_INO(sbi) ||
			inode->i_ino == F2FS_META_INO(sbi))
		goto out_clear;

	f2fs_bug_on(sbi, get_dirty_pages(inode));
	remove_dirty_dir_inode(inode);

	f2fs_destroy_extent_tree(inode);

	if (inode->i_nlink || is_bad_inode(inode))
		goto no_delete;

	set_inode_flag(fi, FI_NO_ALLOC);
	i_size_write(inode, 0);

	if (F2FS_HAS_BLOCKS(inode))
		f2fs_truncate(inode, true);

	f2fs_lock_op(sbi);
	remove_inode_page(inode);
	f2fs_unlock_op(sbi);

no_delete:
	stat_dec_inline_xattr(inode);
	stat_dec_inline_dir(inode);
	stat_dec_inline_inode(inode);

	invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, inode->i_ino);
	if (xnid)
		invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
	if (is_inode_flag_set(fi, FI_APPEND_WRITE))
		add_dirty_inode(sbi, inode->i_ino, APPEND_INO);
	if (is_inode_flag_set(fi, FI_UPDATE_WRITE))
		add_dirty_inode(sbi, inode->i_ino, UPDATE_INO);
	if (is_inode_flag_set(fi, FI_FREE_NID)) {
		alloc_nid_failed(sbi, inode->i_ino);
		clear_inode_flag(fi, FI_FREE_NID);
	}
out_clear:
#ifdef CONFIG_F2FS_FS_ENCRYPTION
	if (fi->i_crypt_info)
		f2fs_free_encryption_info(inode, fi->i_crypt_info);
#endif
	end_writeback(inode);
}
Exemple #11
0
/* caller should call f2fs_lock_op() */
void f2fs_handle_failed_inode(struct inode *inode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct node_info ni;
	int err;

	/*
	 * clear nlink of inode in order to release resource of inode
	 * immediately.
	 */
	clear_nlink(inode);

	/*
	 * we must call this to avoid inode being remained as dirty, resulting
	 * in a panic when flushing dirty inodes in gdirty_list.
	 */
	f2fs_update_inode_page(inode);
	f2fs_inode_synced(inode);

	/* don't make bad inode, since it becomes a regular file. */
	unlock_new_inode(inode);

	/*
	 * Note: we should add inode to orphan list before f2fs_unlock_op()
	 * so we can prevent losing this orphan when encoutering checkpoint
	 * and following suddenly power-off.
	 */
	err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
	if (err) {
		set_sbi_flag(sbi, SBI_NEED_FSCK);
		f2fs_msg(sbi->sb, KERN_WARNING,
			"May loss orphan inode, run fsck to fix.");
		goto out;
	}

	if (ni.blk_addr != NULL_ADDR) {
		err = f2fs_acquire_orphan_inode(sbi);
		if (err) {
			set_sbi_flag(sbi, SBI_NEED_FSCK);
			f2fs_msg(sbi->sb, KERN_WARNING,
				"Too many orphan inodes, run fsck to fix.");
		} else {
			f2fs_add_orphan_inode(inode);
		}
		f2fs_alloc_nid_done(sbi, inode->i_ino);
	} else {
		set_inode_flag(inode, FI_FREE_NID);
	}

out:
	f2fs_unlock_op(sbi);

	/* iput will drop the inode object */
	iput(inode);
}
Exemple #12
0
static void update_parent_metadata(struct inode *dir, struct inode *inode,
						unsigned int current_depth)
{
	if (is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) {
		if (S_ISDIR(inode->i_mode)) {
			inc_nlink(dir);
			set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
		}
		clear_inode_flag(F2FS_I(inode), FI_NEW_INODE);
	}
	dir->i_mtime = dir->i_ctime = CURRENT_TIME;
	mark_inode_dirty(dir);

	if (F2FS_I(dir)->i_current_depth != current_depth) {
		F2FS_I(dir)->i_current_depth = current_depth;
		set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
	}

	if (is_inode_flag_set(F2FS_I(inode), FI_INC_LINK))
		clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
}
Exemple #13
0
static int f2fs_release_file(struct inode *inode, struct file *filp)
{
	/* some remained atomic pages should discarded */
	if (f2fs_is_atomic_file(inode))
		commit_inmem_pages(inode, true);
	if (f2fs_is_volatile_file(inode)) {
		set_inode_flag(F2FS_I(inode), FI_DROP_CACHE);
		filemap_fdatawrite(inode->i_mapping);
		clear_inode_flag(F2FS_I(inode), FI_DROP_CACHE);
	}
	return 0;
}
/*
 * Called at the last iput() if i_nlink is zero
 */
void f2fs_evict_inode(struct inode *inode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	nid_t xnid = F2FS_I(inode)->i_xattr_nid;

	/* some remained atomic pages should discarded */
	if (f2fs_is_atomic_file(inode))
		commit_inmem_pages(inode, true);

	trace_f2fs_evict_inode(inode);
	truncate_inode_pages(&inode->i_data, 0);

	if (inode->i_ino == F2FS_NODE_INO(sbi) ||
			inode->i_ino == F2FS_META_INO(sbi))
		goto out_clear;

	f2fs_bug_on(sbi, get_dirty_pages(inode));
	remove_dirty_dir_inode(inode);

	if (inode->i_nlink || is_bad_inode(inode))
		goto no_delete;

	sb_start_intwrite(inode->i_sb);
	set_inode_flag(F2FS_I(inode), FI_NO_ALLOC);
	i_size_write(inode, 0);

	if (F2FS_HAS_BLOCKS(inode))
		f2fs_truncate(inode);

	f2fs_lock_op(sbi);
	remove_inode_page(inode);
	f2fs_unlock_op(sbi);

	sb_end_intwrite(inode->i_sb);
no_delete:
	stat_dec_inline_dir(inode);
	stat_dec_inline_inode(inode);

	/* update extent info in inode */
	if (inode->i_nlink)
		f2fs_preserve_extent_tree(inode);
	f2fs_destroy_extent_tree(inode);

	invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, inode->i_ino);
	if (xnid)
		invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
	if (is_inode_flag_set(F2FS_I(inode), FI_APPEND_WRITE))
		add_dirty_inode(sbi, inode->i_ino, APPEND_INO);
	if (is_inode_flag_set(F2FS_I(inode), FI_UPDATE_WRITE))
		add_dirty_inode(sbi, inode->i_ino, UPDATE_INO);
out_clear:
	clear_inode(inode);
}
Exemple #15
0
/*
 * Caller ensures that this data page is never allocated.
 * A new zero-filled data page is allocated in the page cache.
 *
 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
 * f2fs_unlock_op().
 * Note that, ipage is set only by make_empty_dir, and if any error occur,
 * ipage should be released by this function.
 */
struct page *get_new_data_page(struct inode *inode,
                               struct page *ipage, pgoff_t index, bool new_i_size)
{
    struct address_space *mapping = inode->i_mapping;
    struct page *page;
    struct dnode_of_data dn;
    int err;
repeat:
    page = grab_cache_page(mapping, index);
    if (!page) {
        /*
         * before exiting, we should make sure ipage will be released
         * if any error occur.
         */
        f2fs_put_page(ipage, 1);
        return ERR_PTR(-ENOMEM);
    }

    set_new_dnode(&dn, inode, ipage, NULL, 0);
    err = f2fs_reserve_block(&dn, index);
    if (err) {
        f2fs_put_page(page, 1);
        return ERR_PTR(err);
    }
    if (!ipage)
        f2fs_put_dnode(&dn);

    if (PageUptodate(page))
        goto got_it;

    if (dn.data_blkaddr == NEW_ADDR) {
        zero_user_segment(page, 0, PAGE_CACHE_SIZE);
        SetPageUptodate(page);
    } else {
        f2fs_put_page(page, 1);

        page = get_read_data_page(inode, index, READ_SYNC);
        if (IS_ERR(page))
            goto repeat;

        /* wait for read completion */
        lock_page(page);
    }
got_it:
    if (new_i_size &&
            i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
        i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
        /* Only the directory inode sets new_i_size */
        set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
    }
    return page;
}
Exemple #16
0
static int f2fs_ioc_start_volatile_write(struct file *filp)
{
	struct inode *inode = file_inode(filp);

	if (!inode_owner_or_capable(inode))
		return -EACCES;

	if (f2fs_is_volatile_file(inode))
		return 0;

	set_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);

	return f2fs_convert_inline_inode(inode);
}
static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
                                   struct f2fs_inline_dentry *inline_dentry)
{
    struct page *page;
    struct dnode_of_data dn;
    struct f2fs_dentry_block *dentry_blk;
    int err;

    page = grab_cache_page(dir->i_mapping, 0);
    if (!page)
        return -ENOMEM;

    set_new_dnode(&dn, dir, ipage, NULL, 0);
    err = f2fs_reserve_block(&dn, 0);
    if (err)
        goto out;

    f2fs_wait_on_page_writeback(page, DATA);
    zero_user_segment(page, 0, PAGE_CACHE_SIZE);

    dentry_blk = kmap_atomic(page);

    /* copy data from inline dentry block to new dentry block */
    memcpy(dentry_blk->dentry_bitmap, inline_dentry->dentry_bitmap,
           INLINE_DENTRY_BITMAP_SIZE);
    memcpy(dentry_blk->dentry, inline_dentry->dentry,
           sizeof(struct f2fs_dir_entry) * NR_INLINE_DENTRY);
    memcpy(dentry_blk->filename, inline_dentry->filename,
           NR_INLINE_DENTRY * F2FS_SLOT_LEN);

    kunmap_atomic(dentry_blk);
    SetPageUptodate(page);
    set_page_dirty(page);

    /* clear inline dir and flag after data writeback */
    truncate_inline_inode(ipage, 0);

    stat_dec_inline_dir(dir);
    clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY);

    if (i_size_read(dir) < PAGE_CACHE_SIZE) {
        i_size_write(dir, PAGE_CACHE_SIZE);
        set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
    }

    sync_inode_page(&dn);
out:
    f2fs_put_page(page, 1);
    return err;
}
static struct inode *f2fs_alloc_inode(struct super_block *sb)
{
	struct f2fs_inode_info *fi;

	fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_NOFS | __GFP_ZERO);
	if (!fi)
		return NULL;

	init_once((void *) fi);

	/* Initialize f2fs-specific inode info */
	fi->vfs_inode.i_version = 1;
	atomic_set(&fi->dirty_dents, 0);
	fi->i_current_depth = 1;
	fi->i_advise = 0;
	rwlock_init(&fi->ext.ext_lock);

	set_inode_flag(fi, FI_NEW_INODE);

	if (test_opt(F2FS_SB(sb), INLINE_XATTR))
		set_inode_flag(fi, FI_INLINE_XATTR);

	return &fi->vfs_inode;
}
Exemple #19
0
static int f2fs_ioc_start_atomic_write(struct file *filp)
{
	struct inode *inode = file_inode(filp);

	if (!inode_owner_or_capable(inode))
		return -EACCES;

	f2fs_balance_fs(F2FS_I_SB(inode));

	if (f2fs_is_atomic_file(inode))
		return 0;

	set_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);

	return f2fs_convert_inline_inode(inode);
}
Exemple #20
0
int recover_inline_data(struct inode *inode, struct page *npage)
{
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
	struct f2fs_inode *ri = NULL;
	void *src_addr, *dst_addr;
	struct page *ipage;

	/*
	 * The inline_data recovery policy is as follows.
	 * [prev.] [next] of inline_data flag
	 *    o       o  -> recover inline_data
	 *    o       x  -> remove inline_data, and then recover data blocks
	 *    x       o  -> remove inline_data, and then recover inline_data
	 *    x       x  -> recover data blocks
	 */
	if (IS_INODE(npage))
		ri = F2FS_INODE(npage);

	if (f2fs_has_inline_data(inode) &&
			ri && ri->i_inline & F2FS_INLINE_DATA) {
process_inline:
		ipage = get_node_page(sbi, inode->i_ino);
		f2fs_bug_on(IS_ERR(ipage));

		src_addr = inline_data_addr(npage);
		dst_addr = inline_data_addr(ipage);
		memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
		update_inode(inode, ipage);
		f2fs_put_page(ipage, 1);
		return -1;
	}

	if (f2fs_has_inline_data(inode)) {
		ipage = get_node_page(sbi, inode->i_ino);
		f2fs_bug_on(IS_ERR(ipage));
		zero_user_segment(ipage, INLINE_DATA_OFFSET,
				 INLINE_DATA_OFFSET + MAX_INLINE_DATA);
		clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
		update_inode(inode, ipage);
		f2fs_put_page(ipage, 1);
	} else if (ri && ri->i_inline & F2FS_INLINE_DATA) {
		truncate_blocks(inode, 0);
		set_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
		goto process_inline;
	}
	return 0;
}
Exemple #21
0
static void __recover_inline_status(struct inode *inode, struct page *ipage)
{
	void *inline_data = inline_data_addr(ipage);
	__le32 *start = inline_data;
	__le32 *end = start + MAX_INLINE_DATA / sizeof(__le32);

	while (start < end) {
		if (*start++) {
			f2fs_wait_on_page_writeback(ipage, NODE, true);

			set_inode_flag(inode, FI_DATA_EXIST);
			set_raw_inline(inode, F2FS_INODE(ipage));
			set_page_dirty(ipage);
			return;
		}
	}
	return;
}
Exemple #22
0
static int f2fs_ioc_start_volatile_write(struct file *filp)
{
	struct inode *inode = file_inode(filp);
	int ret;

	if (!inode_owner_or_capable(inode))
		return -EACCES;

	if (f2fs_is_volatile_file(inode))
		return 0;

	ret = f2fs_convert_inline_inode(inode);
	if (ret)
		return ret;

	set_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
	return 0;
}
Exemple #23
0
static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
	struct inode *inode;
	int err;

	err = dquot_initialize(dir);
	if (err)
		return err;

	inode = f2fs_new_inode(dir, S_IFDIR | mode);
	if (IS_ERR(inode))
		return PTR_ERR(inode);

	inode->i_op = &f2fs_dir_inode_operations;
	inode->i_fop = &f2fs_dir_operations;
	inode->i_mapping->a_ops = &f2fs_dblock_aops;
	mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_HIGH_ZERO);

	set_inode_flag(inode, FI_INC_LINK);
	f2fs_lock_op(sbi);
	err = f2fs_add_link(dentry, inode);
	if (err)
		goto out_fail;
	f2fs_unlock_op(sbi);

	alloc_nid_done(sbi, inode->i_ino);

	d_instantiate(dentry, inode);
	unlock_new_inode(inode);

	if (IS_DIRSYNC(dir))
		f2fs_sync_fs(sbi->sb, 1);

	f2fs_balance_fs(sbi, true);
	return 0;

out_fail:
	clear_inode_flag(inode, FI_INC_LINK);
	handle_failed_inode(inode);
	return err;
}
Exemple #24
0
/*
 * Called at the last iput() if i_nlink is zero
 */
void f2fs_evict_inode(struct inode *inode)
{
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
	nid_t xnid = F2FS_I(inode)->i_xattr_nid;

	trace_f2fs_evict_inode(inode);
	truncate_inode_pages_final(&inode->i_data);

	if (inode->i_ino == F2FS_NODE_INO(sbi) ||
			inode->i_ino == F2FS_META_INO(sbi))
		goto out_clear;

	f2fs_bug_on(get_dirty_dents(inode));
	remove_dirty_dir_inode(inode);

	if (inode->i_nlink || is_bad_inode(inode))
		goto no_delete;

	sb_start_intwrite(inode->i_sb);
	set_inode_flag(F2FS_I(inode), FI_NO_ALLOC);
	i_size_write(inode, 0);

	if (F2FS_HAS_BLOCKS(inode))
		f2fs_truncate(inode);

	f2fs_lock_op(sbi);
	remove_inode_page(inode);
	stat_dec_inline_inode(inode);
	f2fs_unlock_op(sbi);

	sb_end_intwrite(inode->i_sb);
no_delete:
	invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, inode->i_ino);
	if (xnid)
		invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
	if (is_inode_flag_set(F2FS_I(inode), FI_APPEND_WRITE))
		add_dirty_inode(sbi, inode->i_ino, APPEND_INO);
	if (is_inode_flag_set(F2FS_I(inode), FI_UPDATE_WRITE))
		add_dirty_inode(sbi, inode->i_ino, UPDATE_INO);
out_clear:
	clear_inode(inode);
}
Exemple #25
0
/* caller should call f2fs_lock_op() */
void handle_failed_inode(struct inode *inode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);

	clear_nlink(inode);
	make_bad_inode(inode);
	unlock_new_inode(inode);

	i_size_write(inode, 0);
	if (F2FS_HAS_BLOCKS(inode))
		f2fs_truncate(inode, false);

	remove_inode_page(inode);

	set_inode_flag(F2FS_I(inode), FI_FREE_NID);
	f2fs_unlock_op(sbi);

	/* iput will drop the inode object */
	iput(inode);
}
int make_empty_inline_dir(struct inode *inode, struct inode *parent,
                          struct page *ipage)
{
    struct f2fs_inline_dentry *dentry_blk;
    struct f2fs_dentry_ptr d;

    dentry_blk = inline_data_addr(ipage);

    make_dentry_ptr(NULL, &d, (void *)dentry_blk, 2);
    do_make_empty_dir(inode, parent, &d);

    set_page_dirty(ipage);

    /* update i_size to MAX_INLINE_DATA */
    if (i_size_read(inode) < MAX_INLINE_DATA) {
        i_size_write(inode, MAX_INLINE_DATA);
        set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
    }
    return 0;
}
Exemple #27
0
static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
	struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
	struct inode *inode;
	int err;

	f2fs_balance_fs(sbi);

	inode = f2fs_new_inode(dir, S_IFDIR | mode);
	if (IS_ERR(inode))
		return PTR_ERR(inode);

	inode->i_op = &f2fs_dir_inode_operations;
	inode->i_fop = &f2fs_dir_operations;
	inode->i_mapping->a_ops = &f2fs_dblock_aops;
	mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);

	set_inode_flag(F2FS_I(inode), FI_INC_LINK);
	f2fs_lock_op(sbi);
	err = f2fs_add_link(dentry, inode);
	f2fs_unlock_op(sbi);
	if (err)
		goto out_fail;

	alloc_nid_done(sbi, inode->i_ino);

	d_instantiate(dentry, inode);
	unlock_new_inode(inode);

	return 0;

out_fail:
	clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
	clear_nlink(inode);
	unlock_new_inode(inode);
	make_bad_inode(inode);
	iput(inode);
	alloc_nid_failed(sbi, inode->i_ino);
	return err;
}
Exemple #28
0
/* caller should call f2fs_lock_op() */
void handle_failed_inode(struct inode *inode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	int err = 0;

	clear_nlink(inode);
	make_bad_inode(inode);
	unlock_new_inode(inode);

	i_size_write(inode, 0);
	if (F2FS_HAS_BLOCKS(inode))
		err = f2fs_truncate(inode, false);

	if (!err)
		err = remove_inode_page(inode);

	/*
	 * if we skip truncate_node in remove_inode_page bacause we failed
	 * before, it's better to find another way to release resource of
	 * this inode (e.g. valid block count, node block or nid). Here we
	 * choose to add this inode to orphan list, so that we can call iput
	 * for releasing in orphan recovery flow.
	 *
	 * Note: we should add inode to orphan list before f2fs_unlock_op()
	 * so we can prevent losing this orphan when encoutering checkpoint
	 * and following suddenly power-off.
	 */
	if (err && err != -ENOENT) {
		err = acquire_orphan_inode(sbi);
		if (!err)
			add_orphan_inode(sbi, inode->i_ino);
	}

	set_inode_flag(F2FS_I(inode), FI_FREE_NID);
	f2fs_unlock_op(sbi);

	/* iput will drop the inode object */
	iput(inode);
}
Exemple #29
0
static int hmfs_link(struct dentry *old_dentry, struct inode *dir,
				struct dentry *dentry)
{
	struct inode *inode = old_dentry->d_inode;
	struct hmfs_sb_info *sbi = HMFS_I_SB(inode);
	int err, ilock;
	
	inode->i_ctime = CURRENT_TIME;
	ihold(inode);

	set_inode_flag(HMFS_I(inode), FI_INC_LINK);
	ilock = mutex_lock_op(sbi);
	err = hmfs_add_link(dentry, inode);
	mutex_unlock_op(sbi, ilock);
	if (err)
		goto out;
	d_instantiate(dentry, inode);
	return 0;
out:	
	clear_inode_flag(HMFS_I(inode), FI_INC_LINK);
	iput(inode);
	return err;
}
Exemple #30
0
static int __f2fs_convert_inline_data(struct inode *inode, struct page *page)
{
	int err;
	struct page *ipage;
	struct dnode_of_data dn;
	void *src_addr, *dst_addr;
	block_t new_blk_addr;
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
	struct f2fs_io_info fio = {
		.type = DATA,
		.rw = WRITE_SYNC | REQ_PRIO,
	};

	f2fs_lock_op(sbi);
	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage))
		return PTR_ERR(ipage);

	/*
	 * i_addr[0] is not used for inline data,
	 * so reserving new block will not destroy inline data
	 */
	set_new_dnode(&dn, inode, ipage, NULL, 0);
	err = f2fs_reserve_block(&dn, 0);
	if (err) {
		f2fs_unlock_op(sbi);
		return err;
	}

	zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);

	/* Copy the whole inline data block */
	src_addr = inline_data_addr(ipage);
	dst_addr = kmap(page);
	memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
	kunmap(page);
	SetPageUptodate(page);

	/* write data page to try to make data consistent */
	set_page_writeback(page);
	write_data_page(page, &dn, &new_blk_addr, &fio);
	update_extent_cache(new_blk_addr, &dn);
	f2fs_wait_on_page_writeback(page, DATA);

	/* clear inline data and flag after data writeback */
	zero_user_segment(ipage, INLINE_DATA_OFFSET,
				 INLINE_DATA_OFFSET + MAX_INLINE_DATA);
	clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
	stat_dec_inline_inode(inode);

	sync_inode_page(&dn);
	f2fs_put_dnode(&dn);
	f2fs_unlock_op(sbi);
	return err;
}

int f2fs_convert_inline_data(struct inode *inode, pgoff_t to_size)
{
	struct page *page;
	int err;

	if (!f2fs_has_inline_data(inode))
		return 0;
	else if (to_size <= MAX_INLINE_DATA)
		return 0;

	page = grab_cache_page_write_begin(inode->i_mapping, 0, AOP_FLAG_NOFS);
	if (!page)
		return -ENOMEM;

	err = __f2fs_convert_inline_data(inode, page);
	f2fs_put_page(page, 1);
	return err;
}

int f2fs_write_inline_data(struct inode *inode,
			   struct page *page, unsigned size)
{
	void *src_addr, *dst_addr;
	struct page *ipage;
	struct dnode_of_data dn;
	int err;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
	err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
	if (err)
		return err;
	ipage = dn.inode_page;

	zero_user_segment(ipage, INLINE_DATA_OFFSET,
				 INLINE_DATA_OFFSET + MAX_INLINE_DATA);
	src_addr = kmap(page);
	dst_addr = inline_data_addr(ipage);
	memcpy(dst_addr, src_addr, size);
	kunmap(page);

	/* Release the first data block if it is allocated */
	if (!f2fs_has_inline_data(inode)) {
		truncate_data_blocks_range(&dn, 1);
		set_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
		stat_inc_inline_inode(inode);
	}

	sync_inode_page(&dn);
	f2fs_put_dnode(&dn);

	return 0;
}