Example #1
0
static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
						struct vm_fault *vmf)
{
	struct page *page = vmf->page;
	struct inode *inode = file_inode(vma->vm_file);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct dnode_of_data dn;
	int err;

	f2fs_balance_fs(sbi);

	vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);

	f2fs_bug_on(sbi, f2fs_has_inline_data(inode));

	/* block allocation */
	f2fs_lock_op(sbi);
	set_new_dnode(&dn, inode, NULL, NULL, 0);
	err = f2fs_reserve_block(&dn, page->index);
	if (err) {
		f2fs_unlock_op(sbi);
		goto out;
	}
	f2fs_put_dnode(&dn);
	f2fs_unlock_op(sbi);

	file_update_time(vma->vm_file);
	lock_page(page);
	if (unlikely(page->mapping != inode->i_mapping ||
			page_offset(page) > i_size_read(inode) ||
			!PageUptodate(page))) {
		unlock_page(page);
		err = -EFAULT;
		goto out;
	}

	/*
	 * check to see if the page is mapped already (no holes)
	 */
	if (PageMappedToDisk(page))
		goto mapped;

	/* page is wholly or partially inside EOF */
	if (((page->index + 1) << PAGE_CACHE_SHIFT) > i_size_read(inode)) {
		unsigned offset;
		offset = i_size_read(inode) & ~PAGE_CACHE_MASK;
		zero_user_segment(page, offset, PAGE_CACHE_SIZE);
	}
	set_page_dirty(page);
	SetPageUptodate(page);

	trace_f2fs_vm_page_mkwrite(page, DATA);
mapped:
	/* fill the page */
	f2fs_wait_on_page_writeback(page, DATA);
	/* if gced page is attached, don't write to cold segment */
	clear_cold_data(page);
out:
	return block_page_mkwrite_return(err);
}
Example #2
0
static int expand_inode_data(struct inode *inode, loff_t offset,
					loff_t len, int mode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	pgoff_t index, pg_start, pg_end;
	loff_t new_size = i_size_read(inode);
	loff_t off_start, off_end;
	int ret = 0;

	f2fs_balance_fs(sbi);

	ret = inode_newsize_ok(inode, (len + offset));
	if (ret)
		return ret;

	if (f2fs_has_inline_data(inode)) {
		ret = f2fs_convert_inline_inode(inode);
		if (ret)
			return ret;
	}

	pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
	pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;

	off_start = offset & (PAGE_CACHE_SIZE - 1);
	off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);

	f2fs_lock_op(sbi);

	for (index = pg_start; index <= pg_end; index++) {
		struct dnode_of_data dn;

		if (index == pg_end && !off_end)
			goto noalloc;

		set_new_dnode(&dn, inode, NULL, NULL, 0);
		ret = f2fs_reserve_block(&dn, index);
		if (ret)
			break;
noalloc:
		if (pg_start == pg_end)
			new_size = offset + len;
		else if (index == pg_start && off_start)
			new_size = (index + 1) << PAGE_CACHE_SHIFT;
		else if (index == pg_end)
			new_size = (index << PAGE_CACHE_SHIFT) + off_end;
		else
			new_size += PAGE_CACHE_SIZE;
	}

	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
		i_size_read(inode) < new_size) {
		i_size_write(inode, new_size);
		mark_inode_dirty(inode);
		update_inode_page(inode);
	}
	f2fs_unlock_op(sbi);

	return ret;
}
Example #3
0
/*
 * NOTE: ipage is grabbed by caller, but if any error occurs, we should
 * release ipage in this function.
 */
static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
				struct f2fs_inline_dentry *inline_dentry)
{
	struct page *page;
	struct dnode_of_data dn;
	struct f2fs_dentry_block *dentry_blk;
	int err;

	page = grab_cache_page(dir->i_mapping, 0);
	if (!page) {
		f2fs_put_page(ipage, 1);
		return -ENOMEM;
	}

	set_new_dnode(&dn, dir, ipage, NULL, 0);
	err = f2fs_reserve_block(&dn, 0);
	if (err)
		goto out;

	f2fs_wait_on_page_writeback(page, DATA);
	zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);

	dentry_blk = kmap_atomic(page);

	/* copy data from inline dentry block to new dentry block */
	memcpy(dentry_blk->dentry_bitmap, inline_dentry->dentry_bitmap,
					INLINE_DENTRY_BITMAP_SIZE);
	memset(dentry_blk->dentry_bitmap + INLINE_DENTRY_BITMAP_SIZE, 0,
			SIZE_OF_DENTRY_BITMAP - INLINE_DENTRY_BITMAP_SIZE);
	/*
	 * we do not need to zero out remainder part of dentry and filename
	 * field, since we have used bitmap for marking the usage status of
	 * them, besides, we can also ignore copying/zeroing reserved space
	 * of dentry block, because them haven't been used so far.
	 */
	memcpy(dentry_blk->dentry, inline_dentry->dentry,
			sizeof(struct f2fs_dir_entry) * NR_INLINE_DENTRY);
	memcpy(dentry_blk->filename, inline_dentry->filename,
					NR_INLINE_DENTRY * F2FS_SLOT_LEN);

	kunmap_atomic(dentry_blk);
	SetPageUptodate(page);
	set_page_dirty(page);

	/* clear inline dir and flag after data writeback */
	truncate_inline_inode(ipage, 0);

	stat_dec_inline_dir(dir);
	clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY);

	if (i_size_read(dir) < PAGE_CACHE_SIZE) {
		i_size_write(dir, PAGE_CACHE_SIZE);
		set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
	}

	sync_inode_page(&dn);
out:
	f2fs_put_page(page, 1);
	return err;
}
Example #4
0
int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
{
    struct extent_info ei;
    struct inode *inode = dn->inode;

    if (f2fs_lookup_extent_cache(inode, index, &ei)) {
        dn->data_blkaddr = ei.blk + index - ei.fofs;
        return 0;
    }

    return f2fs_reserve_block(dn, index);
}
Example #5
0
/*
 * Caller ensures that this data page is never allocated.
 * A new zero-filled data page is allocated in the page cache.
 *
 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
 * f2fs_unlock_op().
 * Note that, ipage is set only by make_empty_dir, and if any error occur,
 * ipage should be released by this function.
 */
struct page *get_new_data_page(struct inode *inode,
                               struct page *ipage, pgoff_t index, bool new_i_size)
{
    struct address_space *mapping = inode->i_mapping;
    struct page *page;
    struct dnode_of_data dn;
    int err;
repeat:
    page = grab_cache_page(mapping, index);
    if (!page) {
        /*
         * before exiting, we should make sure ipage will be released
         * if any error occur.
         */
        f2fs_put_page(ipage, 1);
        return ERR_PTR(-ENOMEM);
    }

    set_new_dnode(&dn, inode, ipage, NULL, 0);
    err = f2fs_reserve_block(&dn, index);
    if (err) {
        f2fs_put_page(page, 1);
        return ERR_PTR(err);
    }
    if (!ipage)
        f2fs_put_dnode(&dn);

    if (PageUptodate(page))
        goto got_it;

    if (dn.data_blkaddr == NEW_ADDR) {
        zero_user_segment(page, 0, PAGE_CACHE_SIZE);
        SetPageUptodate(page);
    } else {
        f2fs_put_page(page, 1);

        page = get_read_data_page(inode, index, READ_SYNC);
        if (IS_ERR(page))
            goto repeat;

        /* wait for read completion */
        lock_page(page);
    }
got_it:
    if (new_i_size &&
            i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
        i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
        /* Only the directory inode sets new_i_size */
        set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
    }
    return page;
}
static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
                                   struct f2fs_inline_dentry *inline_dentry)
{
    struct page *page;
    struct dnode_of_data dn;
    struct f2fs_dentry_block *dentry_blk;
    int err;

    page = grab_cache_page(dir->i_mapping, 0);
    if (!page)
        return -ENOMEM;

    set_new_dnode(&dn, dir, ipage, NULL, 0);
    err = f2fs_reserve_block(&dn, 0);
    if (err)
        goto out;

    f2fs_wait_on_page_writeback(page, DATA);
    zero_user_segment(page, 0, PAGE_CACHE_SIZE);

    dentry_blk = kmap_atomic(page);

    /* copy data from inline dentry block to new dentry block */
    memcpy(dentry_blk->dentry_bitmap, inline_dentry->dentry_bitmap,
           INLINE_DENTRY_BITMAP_SIZE);
    memcpy(dentry_blk->dentry, inline_dentry->dentry,
           sizeof(struct f2fs_dir_entry) * NR_INLINE_DENTRY);
    memcpy(dentry_blk->filename, inline_dentry->filename,
           NR_INLINE_DENTRY * F2FS_SLOT_LEN);

    kunmap_atomic(dentry_blk);
    SetPageUptodate(page);
    set_page_dirty(page);

    /* clear inline dir and flag after data writeback */
    truncate_inline_inode(ipage, 0);

    stat_dec_inline_dir(dir);
    clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY);

    if (i_size_read(dir) < PAGE_CACHE_SIZE) {
        i_size_write(dir, PAGE_CACHE_SIZE);
        set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
    }

    sync_inode_page(&dn);
out:
    f2fs_put_page(page, 1);
    return err;
}
Example #7
0
File: inline.c Project: 7799/linux
static int __f2fs_convert_inline_data(struct inode *inode, struct page *page)
{
	int err;
	struct page *ipage;
	struct dnode_of_data dn;
	void *src_addr, *dst_addr;
	block_t new_blk_addr;
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
	struct f2fs_io_info fio = {
		.type = DATA,
		.rw = WRITE_SYNC | REQ_PRIO,
	};

	f2fs_lock_op(sbi);
	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage))
		return PTR_ERR(ipage);

	/*
	 * i_addr[0] is not used for inline data,
	 * so reserving new block will not destroy inline data
	 */
	set_new_dnode(&dn, inode, ipage, NULL, 0);
	err = f2fs_reserve_block(&dn, 0);
	if (err) {
		f2fs_unlock_op(sbi);
		return err;
	}

	zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);

	/* Copy the whole inline data block */
	src_addr = inline_data_addr(ipage);
	dst_addr = kmap(page);
	memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
	kunmap(page);
	SetPageUptodate(page);

	/* write data page to try to make data consistent */
	set_page_writeback(page);
	write_data_page(page, &dn, &new_blk_addr, &fio);
	update_extent_cache(new_blk_addr, &dn);
	f2fs_wait_on_page_writeback(page, DATA);

	/* clear inline data and flag after data writeback */
	zero_user_segment(ipage, INLINE_DATA_OFFSET,
				 INLINE_DATA_OFFSET + MAX_INLINE_DATA);
	clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
	stat_dec_inline_inode(inode);

	sync_inode_page(&dn);
	f2fs_put_dnode(&dn);
	f2fs_unlock_op(sbi);
	return err;
}

int f2fs_convert_inline_data(struct inode *inode, pgoff_t to_size)
{
	struct page *page;
	int err;

	if (!f2fs_has_inline_data(inode))
		return 0;
	else if (to_size <= MAX_INLINE_DATA)
		return 0;

	page = grab_cache_page_write_begin(inode->i_mapping, 0, AOP_FLAG_NOFS);
	if (!page)
		return -ENOMEM;

	err = __f2fs_convert_inline_data(inode, page);
	f2fs_put_page(page, 1);
	return err;
}

int f2fs_write_inline_data(struct inode *inode,
			   struct page *page, unsigned size)
{
	void *src_addr, *dst_addr;
	struct page *ipage;
	struct dnode_of_data dn;
	int err;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
	err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
	if (err)
		return err;
	ipage = dn.inode_page;

	zero_user_segment(ipage, INLINE_DATA_OFFSET,
				 INLINE_DATA_OFFSET + MAX_INLINE_DATA);
	src_addr = kmap(page);
	dst_addr = inline_data_addr(ipage);
	memcpy(dst_addr, src_addr, size);
	kunmap(page);

	/* Release the first data block if it is allocated */
	if (!f2fs_has_inline_data(inode)) {
		truncate_data_blocks_range(&dn, 1);
		set_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
		stat_inc_inline_inode(inode);
	}

	sync_inode_page(&dn);
	f2fs_put_dnode(&dn);

	return 0;
}
int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
{
    void *src_addr, *dst_addr;
    struct f2fs_io_info fio = {
        .sbi = F2FS_I_SB(dn->inode),
        .type = DATA,
        .rw = WRITE_SYNC | REQ_PRIO,
        .page = page,
        .encrypted_page = NULL,
    };
    int dirty, err;

    f2fs_bug_on(F2FS_I_SB(dn->inode), page->index);

    if (!f2fs_exist_data(dn->inode))
        goto clear_out;

    err = f2fs_reserve_block(dn, 0);
    if (err)
        return err;

    f2fs_wait_on_page_writeback(page, DATA);

    if (PageUptodate(page))
        goto no_update;

    zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);

    /* Copy the whole inline data block */
    src_addr = inline_data_addr(dn->inode_page);
    dst_addr = kmap_atomic(page);
    memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
    flush_dcache_page(page);
    kunmap_atomic(dst_addr);
    SetPageUptodate(page);
no_update:
    set_page_dirty(page);

    /* clear dirty state */
    dirty = clear_page_dirty_for_io(page);

    /* write data page to try to make data consistent */
    set_page_writeback(page);
    fio.blk_addr = dn->data_blkaddr;
    write_data_page(dn, &fio);
    set_data_blkaddr(dn);
    f2fs_update_extent_cache(dn);
    f2fs_wait_on_page_writeback(page, DATA);
    if (dirty)
        inode_dec_dirty_pages(dn->inode);

    /* this converted inline_data should be recovered. */
    set_inode_flag(F2FS_I(dn->inode), FI_APPEND_WRITE);

    /* clear inline data and flag after data writeback */
    truncate_inline_inode(dn->inode_page, 0);
clear_out:
    stat_dec_inline_inode(dn->inode);
    f2fs_clear_inline_inode(dn->inode);
    sync_inode_page(dn);
    f2fs_put_dnode(dn);
    return 0;
}

int f2fs_convert_inline_inode(struct inode *inode)
{
    struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
    struct dnode_of_data dn;
    struct page *ipage, *page;
    int err = 0;

    page = grab_cache_page(inode->i_mapping, 0);
    if (!page)
        return -ENOMEM;

    f2fs_lock_op(sbi);

    ipage = get_node_page(sbi, inode->i_ino);
    if (IS_ERR(ipage)) {
        err = PTR_ERR(ipage);
        goto out;
    }

    set_new_dnode(&dn, inode, ipage, ipage, 0);

    if (f2fs_has_inline_data(inode))
        err = f2fs_convert_inline_page(&dn, page);

    f2fs_put_dnode(&dn);
out:
    f2fs_unlock_op(sbi);

    f2fs_put_page(page, 1);
    return err;
}

int f2fs_write_inline_data(struct inode *inode, struct page *page)
{
    void *src_addr, *dst_addr;
    struct dnode_of_data dn;
    int err;

    set_new_dnode(&dn, inode, NULL, NULL, 0);
    err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
    if (err)
        return err;

    if (!f2fs_has_inline_data(inode)) {
        f2fs_put_dnode(&dn);
        return -EAGAIN;
    }

    f2fs_bug_on(F2FS_I_SB(inode), page->index);

    f2fs_wait_on_page_writeback(dn.inode_page, NODE);
    src_addr = kmap_atomic(page);
    dst_addr = inline_data_addr(dn.inode_page);
    memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
    kunmap_atomic(src_addr);

    set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
    set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);

    sync_inode_page(&dn);
    f2fs_put_dnode(&dn);
    return 0;
}
Example #9
0
static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
								int mode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct address_space *mapping = inode->i_mapping;
	pgoff_t index, pg_start, pg_end;
	loff_t new_size = i_size_read(inode);
	loff_t off_start, off_end;
	int ret = 0;

	if (!S_ISREG(inode->i_mode))
		return -EINVAL;

	ret = inode_newsize_ok(inode, (len + offset));
	if (ret)
		return ret;

	f2fs_balance_fs(sbi);

	if (f2fs_has_inline_data(inode)) {
		ret = f2fs_convert_inline_inode(inode);
		if (ret)
			return ret;
	}

	ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
	if (ret)
		return ret;

	truncate_pagecache_range(inode, offset, offset + len - 1);

	pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
	pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;

	off_start = offset & (PAGE_CACHE_SIZE - 1);
	off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);

	if (pg_start == pg_end) {
		ret = fill_zero(inode, pg_start, off_start,
						off_end - off_start);
		if (ret)
			return ret;

		if (offset + len > new_size)
			new_size = offset + len;
		new_size = max_t(loff_t, new_size, offset + len);
	} else {
		if (off_start) {
			ret = fill_zero(inode, pg_start++, off_start,
						PAGE_CACHE_SIZE - off_start);
			if (ret)
				return ret;

			new_size = max_t(loff_t, new_size,
						pg_start << PAGE_CACHE_SHIFT);
		}

		for (index = pg_start; index < pg_end; index++) {
			struct dnode_of_data dn;
			struct page *ipage;

			f2fs_lock_op(sbi);

			ipage = get_node_page(sbi, inode->i_ino);
			if (IS_ERR(ipage)) {
				ret = PTR_ERR(ipage);
				f2fs_unlock_op(sbi);
				goto out;
			}

			set_new_dnode(&dn, inode, ipage, NULL, 0);
			ret = f2fs_reserve_block(&dn, index);
			if (ret) {
				f2fs_unlock_op(sbi);
				goto out;
			}

			if (dn.data_blkaddr != NEW_ADDR) {
				invalidate_blocks(sbi, dn.data_blkaddr);

				dn.data_blkaddr = NEW_ADDR;
				set_data_blkaddr(&dn);

				dn.data_blkaddr = NULL_ADDR;
				f2fs_update_extent_cache(&dn);
			}
			f2fs_put_dnode(&dn);
			f2fs_unlock_op(sbi);

			new_size = max_t(loff_t, new_size,
					(index + 1) << PAGE_CACHE_SHIFT);
		}

		if (off_end) {
			ret = fill_zero(inode, pg_end, 0, off_end);
			if (ret)
				goto out;

			new_size = max_t(loff_t, new_size, offset + len);
		}
	}

out:
	if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size) {
		i_size_write(inode, new_size);
		mark_inode_dirty(inode);
		update_inode_page(inode);
	}

	return ret;
}
Example #10
0
static int f2fs_do_collapse(struct inode *inode, pgoff_t start, pgoff_t end)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct dnode_of_data dn;
	pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
	int ret = 0;

	for (; end < nrpages; start++, end++) {
		block_t new_addr, old_addr;

		f2fs_lock_op(sbi);

		set_new_dnode(&dn, inode, NULL, NULL, 0);
		ret = get_dnode_of_data(&dn, end, LOOKUP_NODE_RA);
		if (ret && ret != -ENOENT) {
			goto out;
		} else if (ret == -ENOENT) {
			new_addr = NULL_ADDR;
		} else {
			new_addr = dn.data_blkaddr;
			truncate_data_blocks_range(&dn, 1);
			f2fs_put_dnode(&dn);
		}

		if (new_addr == NULL_ADDR) {
			set_new_dnode(&dn, inode, NULL, NULL, 0);
			ret = get_dnode_of_data(&dn, start, LOOKUP_NODE_RA);
			if (ret && ret != -ENOENT) {
				goto out;
			} else if (ret == -ENOENT) {
				f2fs_unlock_op(sbi);
				continue;
			}

			if (dn.data_blkaddr == NULL_ADDR) {
				f2fs_put_dnode(&dn);
				f2fs_unlock_op(sbi);
				continue;
			} else {
				truncate_data_blocks_range(&dn, 1);
			}

			f2fs_put_dnode(&dn);
		} else {
			struct page *ipage;

			ipage = get_node_page(sbi, inode->i_ino);
			if (IS_ERR(ipage)) {
				ret = PTR_ERR(ipage);
				goto out;
			}

			set_new_dnode(&dn, inode, ipage, NULL, 0);
			ret = f2fs_reserve_block(&dn, start);
			if (ret)
				goto out;

			old_addr = dn.data_blkaddr;
			if (old_addr != NEW_ADDR && new_addr == NEW_ADDR) {
				dn.data_blkaddr = NULL_ADDR;
				f2fs_update_extent_cache(&dn);
				invalidate_blocks(sbi, old_addr);

				dn.data_blkaddr = new_addr;
				set_data_blkaddr(&dn);
			} else if (new_addr != NEW_ADDR) {
				struct node_info ni;

				get_node_info(sbi, dn.nid, &ni);
				f2fs_replace_block(sbi, &dn, old_addr, new_addr,
							ni.version, true);
			}

			f2fs_put_dnode(&dn);
		}
		f2fs_unlock_op(sbi);
	}
	return 0;
out:
	f2fs_unlock_op(sbi);
	return ret;
}
Example #11
0
static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	pgoff_t pg_start, pg_end, delta, nrpages, idx;
	loff_t new_size;
	int ret;

	if (!S_ISREG(inode->i_mode))
		return -EINVAL;

	new_size = i_size_read(inode) + len;
	if (new_size > inode->i_sb->s_maxbytes)
		return -EFBIG;

	if (offset >= i_size_read(inode))
		return -EINVAL;

	/* insert range should be aligned to block size of f2fs. */
	if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
		return -EINVAL;

	f2fs_balance_fs(sbi);

	if (f2fs_has_inline_data(inode)) {
		ret = f2fs_convert_inline_inode(inode);
		if (ret)
			return ret;
	}

	ret = truncate_blocks(inode, i_size_read(inode), true);
	if (ret)
		return ret;

	/* write out all dirty pages from offset */
	ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
	if (ret)
		return ret;

	truncate_pagecache(inode, 0, offset);

	pg_start = offset >> PAGE_CACHE_SHIFT;
	pg_end = (offset + len) >> PAGE_CACHE_SHIFT;
	delta = pg_end - pg_start;
	nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;

	for (idx = nrpages - 1; idx >= pg_start && idx != -1; idx--) {
		struct dnode_of_data dn;
		struct page *ipage;
		block_t new_addr, old_addr;

		f2fs_lock_op(sbi);

		set_new_dnode(&dn, inode, NULL, NULL, 0);
		ret = get_dnode_of_data(&dn, idx, LOOKUP_NODE_RA);
		if (ret && ret != -ENOENT) {
			goto out;
		} else if (ret == -ENOENT) {
			goto next;
		} else if (dn.data_blkaddr == NULL_ADDR) {
			f2fs_put_dnode(&dn);
			goto next;
		} else {
			new_addr = dn.data_blkaddr;
			truncate_data_blocks_range(&dn, 1);
			f2fs_put_dnode(&dn);
		}

		ipage = get_node_page(sbi, inode->i_ino);
		if (IS_ERR(ipage)) {
			ret = PTR_ERR(ipage);
			goto out;
		}

		set_new_dnode(&dn, inode, ipage, NULL, 0);
		ret = f2fs_reserve_block(&dn, idx + delta);
		if (ret)
			goto out;

		old_addr = dn.data_blkaddr;
		f2fs_bug_on(sbi, old_addr != NEW_ADDR);

		if (new_addr != NEW_ADDR) {
			struct node_info ni;

			get_node_info(sbi, dn.nid, &ni);
			f2fs_replace_block(sbi, &dn, old_addr, new_addr,
							ni.version, true);
		}
		f2fs_put_dnode(&dn);
next:
		f2fs_unlock_op(sbi);
	}

	i_size_write(inode, new_size);
	return 0;
out:
	f2fs_unlock_op(sbi);
	return ret;
}
Example #12
0
File: inline.c Project: mdamt/linux
int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
{
	struct f2fs_io_info fio = {
		.sbi = F2FS_I_SB(dn->inode),
		.type = DATA,
		.op = REQ_OP_WRITE,
		.op_flags = REQ_SYNC | REQ_PRIO,
		.page = page,
		.encrypted_page = NULL,
	};
	int dirty, err;

	if (!f2fs_exist_data(dn->inode))
		goto clear_out;

	err = f2fs_reserve_block(dn, 0);
	if (err)
		return err;

	f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page));

	read_inline_data(page, dn->inode_page);
	set_page_dirty(page);

	/* clear dirty state */
	dirty = clear_page_dirty_for_io(page);

	/* write data page to try to make data consistent */
	set_page_writeback(page);
	fio.old_blkaddr = dn->data_blkaddr;
	set_inode_flag(dn->inode, FI_HOT_DATA);
	write_data_page(dn, &fio);
	f2fs_wait_on_page_writeback(page, DATA, true);
	if (dirty) {
		inode_dec_dirty_pages(dn->inode);
		remove_dirty_inode(dn->inode);
	}

	/* this converted inline_data should be recovered. */
	set_inode_flag(dn->inode, FI_APPEND_WRITE);

	/* clear inline data and flag after data writeback */
	truncate_inline_inode(dn->inode, dn->inode_page, 0);
	clear_inline_node(dn->inode_page);
clear_out:
	stat_dec_inline_inode(dn->inode);
	clear_inode_flag(dn->inode, FI_INLINE_DATA);
	f2fs_put_dnode(dn);
	return 0;
}

int f2fs_convert_inline_inode(struct inode *inode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct dnode_of_data dn;
	struct page *ipage, *page;
	int err = 0;

	if (!f2fs_has_inline_data(inode))
		return 0;

	page = f2fs_grab_cache_page(inode->i_mapping, 0, false);
	if (!page)
		return -ENOMEM;

	f2fs_lock_op(sbi);

	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto out;
	}

	set_new_dnode(&dn, inode, ipage, ipage, 0);

	if (f2fs_has_inline_data(inode))
		err = f2fs_convert_inline_page(&dn, page);

	f2fs_put_dnode(&dn);
out:
	f2fs_unlock_op(sbi);

	f2fs_put_page(page, 1);

	f2fs_balance_fs(sbi, dn.node_changed);

	return err;
}

int f2fs_write_inline_data(struct inode *inode, struct page *page)
{
	void *src_addr, *dst_addr;
	struct dnode_of_data dn;
	int err;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
	err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
	if (err)
		return err;

	if (!f2fs_has_inline_data(inode)) {
		f2fs_put_dnode(&dn);
		return -EAGAIN;
	}

	f2fs_bug_on(F2FS_I_SB(inode), page->index);

	f2fs_wait_on_page_writeback(dn.inode_page, NODE, true);
	src_addr = kmap_atomic(page);
	dst_addr = inline_data_addr(dn.inode_page);
	memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
	kunmap_atomic(src_addr);
	set_page_dirty(dn.inode_page);

	set_inode_flag(inode, FI_APPEND_WRITE);
	set_inode_flag(inode, FI_DATA_EXIST);

	clear_inline_node(dn.inode_page);
	f2fs_put_dnode(&dn);
	return 0;
}
Example #13
0
static int __exchange_data_block(struct inode *inode, pgoff_t src,
					pgoff_t dst, bool full)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct dnode_of_data dn;
	block_t new_addr;
	bool do_replace = false;
	int ret;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
	ret = get_dnode_of_data(&dn, src, LOOKUP_NODE_RA);
	if (ret && ret != -ENOENT) {
		return ret;
	} else if (ret == -ENOENT) {
		new_addr = NULL_ADDR;
	} else {
		new_addr = dn.data_blkaddr;
		if (!is_checkpointed_data(sbi, new_addr)) {
			dn.data_blkaddr = NULL_ADDR;
			/* do not invalidate this block address */
			set_data_blkaddr(&dn);
			f2fs_update_extent_cache(&dn);
			do_replace = true;
		}
		f2fs_put_dnode(&dn);
	}

	if (new_addr == NULL_ADDR)
		return full ? truncate_hole(inode, dst, dst + 1) : 0;

	if (do_replace) {
		struct page *ipage = get_node_page(sbi, inode->i_ino);
		struct node_info ni;

		if (IS_ERR(ipage)) {
			ret = PTR_ERR(ipage);
			goto err_out;
		}

		set_new_dnode(&dn, inode, ipage, NULL, 0);
		ret = f2fs_reserve_block(&dn, dst);
		if (ret)
			goto err_out;

		truncate_data_blocks_range(&dn, 1);

		get_node_info(sbi, dn.nid, &ni);
		f2fs_replace_block(sbi, &dn, dn.data_blkaddr, new_addr,
				ni.version, true);
		f2fs_put_dnode(&dn);
	} else {
		struct page *psrc, *pdst;

		psrc = get_lock_data_page(inode, src, true);
		if (IS_ERR(psrc))
			return PTR_ERR(psrc);
		pdst = get_new_data_page(inode, NULL, dst, false);
		if (IS_ERR(pdst)) {
			f2fs_put_page(psrc, 1);
			return PTR_ERR(pdst);
		}
		f2fs_copy_page(psrc, pdst);
		set_page_dirty(pdst);
		f2fs_put_page(pdst, 1);
		f2fs_put_page(psrc, 1);

		return truncate_hole(inode, src, src + 1);
	}
	return 0;

err_out:
	if (!get_dnode_of_data(&dn, src, LOOKUP_NODE)) {
		dn.data_blkaddr = new_addr;
		set_data_blkaddr(&dn);
		f2fs_update_extent_cache(&dn);
		f2fs_put_dnode(&dn);
	}
	return ret;
}