Пример #1
0
int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
{
	int nr_free = 0, ofs = dn->ofs_in_node;
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
	struct f2fs_node *raw_node;
	__le32 *addr;

	raw_node = F2FS_NODE(dn->node_page);
	addr = blkaddr_in_node(raw_node) + ofs;

	for (; count > 0; count--, addr++, dn->ofs_in_node++) {
		block_t blkaddr = le32_to_cpu(*addr);
		if (blkaddr == NULL_ADDR)
			continue;

		dn->data_blkaddr = NULL_ADDR;
		set_data_blkaddr(dn);
		f2fs_update_extent_cache(dn);
		invalidate_blocks(sbi, blkaddr);
		if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
			clear_inode_flag(F2FS_I(dn->inode),
						FI_FIRST_BLOCK_WRITTEN);
		nr_free++;
	}
	if (nr_free) {
		dec_valid_block_count(sbi, dn->inode, nr_free);
		set_page_dirty(dn->node_page);
		sync_inode_page(dn);
	}
	dn->ofs_in_node = ofs;

	trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
					 dn->ofs_in_node, nr_free);
	return nr_free;
}
Пример #2
0
int do_write_data_page(struct f2fs_io_info *fio)
{
    struct page *page = fio->page;
    struct inode *inode = page->mapping->host;
    struct dnode_of_data dn;
    int err = 0;

    set_new_dnode(&dn, inode, NULL, NULL, 0);
    err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
    if (err)
        return err;

    fio->blk_addr = dn.data_blkaddr;

    /* This page is already truncated */
    if (fio->blk_addr == NULL_ADDR) {
        ClearPageUptodate(page);
        goto out_writepage;
    }

    if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
        fio->encrypted_page = f2fs_encrypt(inode, fio->page);
        if (IS_ERR(fio->encrypted_page)) {
            err = PTR_ERR(fio->encrypted_page);
            goto out_writepage;
        }
    }

    set_page_writeback(page);

    /*
     * If current allocation needs SSR,
     * it had better in-place writes for updated data.
     */
    if (unlikely(fio->blk_addr != NEW_ADDR &&
                 !is_cold_data(page) &&
                 need_inplace_update(inode))) {
        rewrite_data_page(fio);
        set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
        trace_f2fs_do_write_data_page(page, IPU);
    } else {
        write_data_page(&dn, fio);
        set_data_blkaddr(&dn);
        f2fs_update_extent_cache(&dn);
        trace_f2fs_do_write_data_page(page, OPU);
        set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
        if (page->index == 0)
            set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
    }
out_writepage:
    f2fs_put_dnode(&dn);
    return err;
}
Пример #3
0
int reserve_new_block(struct dnode_of_data *dn)
{
    struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);

    if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
        return -EPERM;
    if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
        return -ENOSPC;

    trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);

    dn->data_blkaddr = NEW_ADDR;
    set_data_blkaddr(dn);
    mark_inode_dirty(dn->inode);
    sync_inode_page(dn);
    return 0;
}
Пример #4
0
int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
	struct f2fs_node *raw_node;
	int nr_free = 0, ofs = dn->ofs_in_node, len = count;
	__le32 *addr;

	raw_node = F2FS_NODE(dn->node_page);
	addr = blkaddr_in_node(raw_node) + ofs;

	for (; count > 0; count--, addr++, dn->ofs_in_node++) {
		block_t blkaddr = le32_to_cpu(*addr);
		if (blkaddr == NULL_ADDR)
			continue;

		dn->data_blkaddr = NULL_ADDR;
		set_data_blkaddr(dn);
		invalidate_blocks(sbi, blkaddr);
		if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
			clear_inode_flag(F2FS_I(dn->inode),
						FI_FIRST_BLOCK_WRITTEN);
		nr_free++;
	}

	if (nr_free) {
		pgoff_t fofs;
		/*
		 * once we invalidate valid blkaddr in range [ofs, ofs + count],
		 * we will invalidate all blkaddr in the whole range.
		 */
		fofs = start_bidx_of_node(ofs_of_node(dn->node_page),
						F2FS_I(dn->inode)) + ofs;
		f2fs_update_extent_cache_range(dn, fofs, 0, len);
		dec_valid_block_count(sbi, dn->inode, nr_free);
		set_page_dirty(dn->node_page);
		sync_inode_page(dn);
	}
	dn->ofs_in_node = ofs;

	trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
					 dn->ofs_in_node, nr_free);
	return nr_free;
}
Пример #5
0
static int __allocate_data_block(struct dnode_of_data *dn)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
	struct f2fs_inode_info *fi = F2FS_I(dn->inode);
	struct f2fs_summary sum;
	struct node_info ni;
	int seg = CURSEG_WARM_DATA;
	pgoff_t fofs;

	if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
		return -EPERM;

	dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
	if (dn->data_blkaddr == NEW_ADDR)
		goto alloc;

	if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
		return -ENOSPC;

alloc:
	get_node_info(sbi, dn->nid, &ni);
	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);

	if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page)
		seg = CURSEG_DIRECT_IO;

	allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
								&sum, seg);
	set_data_blkaddr(dn);

	/* update i_size */
	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
							dn->ofs_in_node;
	if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT))
		i_size_write(dn->inode,
				((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT));

	/* direct IO doesn't use extent cache to maximize the performance */
	f2fs_drop_largest_extent(dn->inode, fofs);

	return 0;
}
Пример #6
0
int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
{
    void *src_addr, *dst_addr;
    struct f2fs_io_info fio = {
        .sbi = F2FS_I_SB(dn->inode),
        .type = DATA,
        .rw = WRITE_SYNC | REQ_PRIO,
        .page = page,
        .encrypted_page = NULL,
    };
    int dirty, err;

    f2fs_bug_on(F2FS_I_SB(dn->inode), page->index);

    if (!f2fs_exist_data(dn->inode))
        goto clear_out;

    err = f2fs_reserve_block(dn, 0);
    if (err)
        return err;

    f2fs_wait_on_page_writeback(page, DATA);

    if (PageUptodate(page))
        goto no_update;

    zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);

    /* Copy the whole inline data block */
    src_addr = inline_data_addr(dn->inode_page);
    dst_addr = kmap_atomic(page);
    memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
    flush_dcache_page(page);
    kunmap_atomic(dst_addr);
    SetPageUptodate(page);
no_update:
    set_page_dirty(page);

    /* clear dirty state */
    dirty = clear_page_dirty_for_io(page);

    /* write data page to try to make data consistent */
    set_page_writeback(page);
    fio.blk_addr = dn->data_blkaddr;
    write_data_page(dn, &fio);
    set_data_blkaddr(dn);
    f2fs_update_extent_cache(dn);
    f2fs_wait_on_page_writeback(page, DATA);
    if (dirty)
        inode_dec_dirty_pages(dn->inode);

    /* this converted inline_data should be recovered. */
    set_inode_flag(F2FS_I(dn->inode), FI_APPEND_WRITE);

    /* clear inline data and flag after data writeback */
    truncate_inline_inode(dn->inode_page, 0);
clear_out:
    stat_dec_inline_inode(dn->inode);
    f2fs_clear_inline_inode(dn->inode);
    sync_inode_page(dn);
    f2fs_put_dnode(dn);
    return 0;
}

int f2fs_convert_inline_inode(struct inode *inode)
{
    struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
    struct dnode_of_data dn;
    struct page *ipage, *page;
    int err = 0;

    page = grab_cache_page(inode->i_mapping, 0);
    if (!page)
        return -ENOMEM;

    f2fs_lock_op(sbi);

    ipage = get_node_page(sbi, inode->i_ino);
    if (IS_ERR(ipage)) {
        err = PTR_ERR(ipage);
        goto out;
    }

    set_new_dnode(&dn, inode, ipage, ipage, 0);

    if (f2fs_has_inline_data(inode))
        err = f2fs_convert_inline_page(&dn, page);

    f2fs_put_dnode(&dn);
out:
    f2fs_unlock_op(sbi);

    f2fs_put_page(page, 1);
    return err;
}

int f2fs_write_inline_data(struct inode *inode, struct page *page)
{
    void *src_addr, *dst_addr;
    struct dnode_of_data dn;
    int err;

    set_new_dnode(&dn, inode, NULL, NULL, 0);
    err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
    if (err)
        return err;

    if (!f2fs_has_inline_data(inode)) {
        f2fs_put_dnode(&dn);
        return -EAGAIN;
    }

    f2fs_bug_on(F2FS_I_SB(inode), page->index);

    f2fs_wait_on_page_writeback(dn.inode_page, NODE);
    src_addr = kmap_atomic(page);
    dst_addr = inline_data_addr(dn.inode_page);
    memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
    kunmap_atomic(src_addr);

    set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
    set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);

    sync_inode_page(&dn);
    f2fs_put_dnode(&dn);
    return 0;
}
Пример #7
0
static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
								int mode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct address_space *mapping = inode->i_mapping;
	pgoff_t index, pg_start, pg_end;
	loff_t new_size = i_size_read(inode);
	loff_t off_start, off_end;
	int ret = 0;

	if (!S_ISREG(inode->i_mode))
		return -EINVAL;

	ret = inode_newsize_ok(inode, (len + offset));
	if (ret)
		return ret;

	f2fs_balance_fs(sbi);

	if (f2fs_has_inline_data(inode)) {
		ret = f2fs_convert_inline_inode(inode);
		if (ret)
			return ret;
	}

	ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
	if (ret)
		return ret;

	truncate_pagecache_range(inode, offset, offset + len - 1);

	pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
	pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;

	off_start = offset & (PAGE_CACHE_SIZE - 1);
	off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);

	if (pg_start == pg_end) {
		ret = fill_zero(inode, pg_start, off_start,
						off_end - off_start);
		if (ret)
			return ret;

		if (offset + len > new_size)
			new_size = offset + len;
		new_size = max_t(loff_t, new_size, offset + len);
	} else {
		if (off_start) {
			ret = fill_zero(inode, pg_start++, off_start,
						PAGE_CACHE_SIZE - off_start);
			if (ret)
				return ret;

			new_size = max_t(loff_t, new_size,
						pg_start << PAGE_CACHE_SHIFT);
		}

		for (index = pg_start; index < pg_end; index++) {
			struct dnode_of_data dn;
			struct page *ipage;

			f2fs_lock_op(sbi);

			ipage = get_node_page(sbi, inode->i_ino);
			if (IS_ERR(ipage)) {
				ret = PTR_ERR(ipage);
				f2fs_unlock_op(sbi);
				goto out;
			}

			set_new_dnode(&dn, inode, ipage, NULL, 0);
			ret = f2fs_reserve_block(&dn, index);
			if (ret) {
				f2fs_unlock_op(sbi);
				goto out;
			}

			if (dn.data_blkaddr != NEW_ADDR) {
				invalidate_blocks(sbi, dn.data_blkaddr);

				dn.data_blkaddr = NEW_ADDR;
				set_data_blkaddr(&dn);

				dn.data_blkaddr = NULL_ADDR;
				f2fs_update_extent_cache(&dn);
			}
			f2fs_put_dnode(&dn);
			f2fs_unlock_op(sbi);

			new_size = max_t(loff_t, new_size,
					(index + 1) << PAGE_CACHE_SHIFT);
		}

		if (off_end) {
			ret = fill_zero(inode, pg_end, 0, off_end);
			if (ret)
				goto out;

			new_size = max_t(loff_t, new_size, offset + len);
		}
	}

out:
	if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size) {
		i_size_write(inode, new_size);
		mark_inode_dirty(inode);
		update_inode_page(inode);
	}

	return ret;
}
Пример #8
0
static int f2fs_do_collapse(struct inode *inode, pgoff_t start, pgoff_t end)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct dnode_of_data dn;
	pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
	int ret = 0;

	for (; end < nrpages; start++, end++) {
		block_t new_addr, old_addr;

		f2fs_lock_op(sbi);

		set_new_dnode(&dn, inode, NULL, NULL, 0);
		ret = get_dnode_of_data(&dn, end, LOOKUP_NODE_RA);
		if (ret && ret != -ENOENT) {
			goto out;
		} else if (ret == -ENOENT) {
			new_addr = NULL_ADDR;
		} else {
			new_addr = dn.data_blkaddr;
			truncate_data_blocks_range(&dn, 1);
			f2fs_put_dnode(&dn);
		}

		if (new_addr == NULL_ADDR) {
			set_new_dnode(&dn, inode, NULL, NULL, 0);
			ret = get_dnode_of_data(&dn, start, LOOKUP_NODE_RA);
			if (ret && ret != -ENOENT) {
				goto out;
			} else if (ret == -ENOENT) {
				f2fs_unlock_op(sbi);
				continue;
			}

			if (dn.data_blkaddr == NULL_ADDR) {
				f2fs_put_dnode(&dn);
				f2fs_unlock_op(sbi);
				continue;
			} else {
				truncate_data_blocks_range(&dn, 1);
			}

			f2fs_put_dnode(&dn);
		} else {
			struct page *ipage;

			ipage = get_node_page(sbi, inode->i_ino);
			if (IS_ERR(ipage)) {
				ret = PTR_ERR(ipage);
				goto out;
			}

			set_new_dnode(&dn, inode, ipage, NULL, 0);
			ret = f2fs_reserve_block(&dn, start);
			if (ret)
				goto out;

			old_addr = dn.data_blkaddr;
			if (old_addr != NEW_ADDR && new_addr == NEW_ADDR) {
				dn.data_blkaddr = NULL_ADDR;
				f2fs_update_extent_cache(&dn);
				invalidate_blocks(sbi, old_addr);

				dn.data_blkaddr = new_addr;
				set_data_blkaddr(&dn);
			} else if (new_addr != NEW_ADDR) {
				struct node_info ni;

				get_node_info(sbi, dn.nid, &ni);
				f2fs_replace_block(sbi, &dn, old_addr, new_addr,
							ni.version, true);
			}

			f2fs_put_dnode(&dn);
		}
		f2fs_unlock_op(sbi);
	}
	return 0;
out:
	f2fs_unlock_op(sbi);
	return ret;
}
Пример #9
0
static int __exchange_data_block(struct inode *inode, pgoff_t src,
					pgoff_t dst, bool full)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct dnode_of_data dn;
	block_t new_addr;
	bool do_replace = false;
	int ret;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
	ret = get_dnode_of_data(&dn, src, LOOKUP_NODE_RA);
	if (ret && ret != -ENOENT) {
		return ret;
	} else if (ret == -ENOENT) {
		new_addr = NULL_ADDR;
	} else {
		new_addr = dn.data_blkaddr;
		if (!is_checkpointed_data(sbi, new_addr)) {
			dn.data_blkaddr = NULL_ADDR;
			/* do not invalidate this block address */
			set_data_blkaddr(&dn);
			f2fs_update_extent_cache(&dn);
			do_replace = true;
		}
		f2fs_put_dnode(&dn);
	}

	if (new_addr == NULL_ADDR)
		return full ? truncate_hole(inode, dst, dst + 1) : 0;

	if (do_replace) {
		struct page *ipage = get_node_page(sbi, inode->i_ino);
		struct node_info ni;

		if (IS_ERR(ipage)) {
			ret = PTR_ERR(ipage);
			goto err_out;
		}

		set_new_dnode(&dn, inode, ipage, NULL, 0);
		ret = f2fs_reserve_block(&dn, dst);
		if (ret)
			goto err_out;

		truncate_data_blocks_range(&dn, 1);

		get_node_info(sbi, dn.nid, &ni);
		f2fs_replace_block(sbi, &dn, dn.data_blkaddr, new_addr,
				ni.version, true);
		f2fs_put_dnode(&dn);
	} else {
		struct page *psrc, *pdst;

		psrc = get_lock_data_page(inode, src, true);
		if (IS_ERR(psrc))
			return PTR_ERR(psrc);
		pdst = get_new_data_page(inode, NULL, dst, false);
		if (IS_ERR(pdst)) {
			f2fs_put_page(psrc, 1);
			return PTR_ERR(pdst);
		}
		f2fs_copy_page(psrc, pdst);
		set_page_dirty(pdst);
		f2fs_put_page(pdst, 1);
		f2fs_put_page(psrc, 1);

		return truncate_hole(inode, src, src + 1);
	}
	return 0;

err_out:
	if (!get_dnode_of_data(&dn, src, LOOKUP_NODE)) {
		dn.data_blkaddr = new_addr;
		set_data_blkaddr(&dn);
		f2fs_update_extent_cache(&dn);
		f2fs_put_dnode(&dn);
	}
	return ret;
}
Пример #10
0
int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
{
    struct inode *inode = file->f_mapping->host;
    struct f2fs_inode_info *fi = F2FS_I(inode);
    struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
    nid_t ino = inode->i_ino;
    int ret = 0;
    bool need_cp = false;
    struct writeback_control wbc = {
        .sync_mode = WB_SYNC_ALL,
        .nr_to_write = LONG_MAX,
        .for_reclaim = 0,
    };

    if (unlikely(f2fs_readonly(inode->i_sb)))
        return 0;

    trace_f2fs_sync_file_enter(inode);

    /* if fdatasync is triggered, let's do in-place-update */
    if (get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
        set_inode_flag(fi, FI_NEED_IPU);
    ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
    clear_inode_flag(fi, FI_NEED_IPU);

    if (ret) {
        trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
        return ret;
    }

    /* if the inode is dirty, let's recover all the time */
    if (!datasync && is_inode_flag_set(fi, FI_DIRTY_INODE)) {
        update_inode_page(inode);
        goto go_write;
    }

    /*
     * if there is no written data, don't waste time to write recovery info.
     */
    if (!is_inode_flag_set(fi, FI_APPEND_WRITE) &&
            !exist_written_data(sbi, ino, APPEND_INO)) {

        /* it may call write_inode just prior to fsync */
        if (need_inode_page_update(sbi, ino))
            goto go_write;

        if (is_inode_flag_set(fi, FI_UPDATE_WRITE) ||
                exist_written_data(sbi, ino, UPDATE_INO))
            goto flush_out;
        goto out;
    }
go_write:
    /* guarantee free sections for fsync */
    f2fs_balance_fs(sbi);

    /*
     * Both of fdatasync() and fsync() are able to be recovered from
     * sudden-power-off.
     */
    down_read(&fi->i_sem);
    need_cp = need_do_checkpoint(inode);
    up_read(&fi->i_sem);

    if (need_cp) {
        /* all the dirty node pages should be flushed for POR */
        ret = f2fs_sync_fs(inode->i_sb, 1);

        /*
         * We've secured consistency through sync_fs. Following pino
         * will be used only for fsynced inodes after checkpoint.
         */
        try_to_fix_pino(inode);
        clear_inode_flag(fi, FI_APPEND_WRITE);
        clear_inode_flag(fi, FI_UPDATE_WRITE);
        goto out;
    }
sync_nodes:
    sync_node_pages(sbi, ino, &wbc);

    /* if cp_error was enabled, we should avoid infinite loop */
    if (unlikely(f2fs_cp_error(sbi)))
        goto out;

    if (need_inode_block_update(sbi, ino)) {
        mark_inode_dirty_sync(inode);
        f2fs_write_inode(inode, NULL);
        goto sync_nodes;
    }

    ret = wait_on_node_pages_writeback(sbi, ino);
    if (ret)
        goto out;

    /* once recovery info is written, don't need to tack this */
    remove_dirty_inode(sbi, ino, APPEND_INO);
    clear_inode_flag(fi, FI_APPEND_WRITE);
flush_out:
    remove_dirty_inode(sbi, ino, UPDATE_INO);
    clear_inode_flag(fi, FI_UPDATE_WRITE);
    ret = f2fs_issue_flush(sbi);
out:
    trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
    f2fs_trace_ios(NULL, 1);
    return ret;
}

static pgoff_t __get_first_dirty_index(struct address_space *mapping,
                                       pgoff_t pgofs, int whence)
{
    struct pagevec pvec;
    int nr_pages;

    if (whence != SEEK_DATA)
        return 0;

    /* find first dirty page index */
    pagevec_init(&pvec, 0);
    nr_pages = pagevec_lookup_tag(&pvec, mapping, &pgofs,
                                  PAGECACHE_TAG_DIRTY, 1);
    pgofs = nr_pages ? pvec.pages[0]->index : LONG_MAX;
    pagevec_release(&pvec);
    return pgofs;
}

static bool __found_offset(block_t blkaddr, pgoff_t dirty, pgoff_t pgofs,
                           int whence)
{
    switch (whence) {
    case SEEK_DATA:
        if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
                (blkaddr != NEW_ADDR && blkaddr != NULL_ADDR))
            return true;
        break;
    case SEEK_HOLE:
        if (blkaddr == NULL_ADDR)
            return true;
        break;
    }
    return false;
}

static inline int unsigned_offsets(struct file *file)
{
    return file->f_mode & FMODE_UNSIGNED_OFFSET;
}

static loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize)
{
    if (offset < 0 && !unsigned_offsets(file))
        return -EINVAL;
    if (offset > maxsize)
        return -EINVAL;

    if (offset != file->f_pos) {
        file->f_pos = offset;
        file->f_version = 0;
    }
    return offset;
}

static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
{
    struct inode *inode = file->f_mapping->host;
    loff_t maxbytes = inode->i_sb->s_maxbytes;
    struct dnode_of_data dn;
    pgoff_t pgofs, end_offset, dirty;
    loff_t data_ofs = offset;
    loff_t isize;
    int err = 0;

    mutex_lock(&inode->i_mutex);

    isize = i_size_read(inode);
    if (offset >= isize)
        goto fail;

    /* handle inline data case */
    if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
        if (whence == SEEK_HOLE)
            data_ofs = isize;
        goto found;
    }

    pgofs = (pgoff_t)(offset >> PAGE_CACHE_SHIFT);

    dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);

    for (; data_ofs < isize; data_ofs = pgofs << PAGE_CACHE_SHIFT) {
        set_new_dnode(&dn, inode, NULL, NULL, 0);
        err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
        if (err && err != -ENOENT) {
            goto fail;
        } else if (err == -ENOENT) {
            /* direct node does not exists */
            if (whence == SEEK_DATA) {
                pgofs = PGOFS_OF_NEXT_DNODE(pgofs,
                                            F2FS_I(inode));
                continue;
            } else {
                goto found;
            }
        }

        end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));

        /* find data/hole in dnode block */
        for (; dn.ofs_in_node < end_offset;
                dn.ofs_in_node++, pgofs++,
                data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) {
            block_t blkaddr;
            blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);

            if (__found_offset(blkaddr, dirty, pgofs, whence)) {
                f2fs_put_dnode(&dn);
                goto found;
            }
        }
        f2fs_put_dnode(&dn);
    }

    if (whence == SEEK_DATA)
        goto fail;
found:
    if (whence == SEEK_HOLE && data_ofs > isize)
        data_ofs = isize;
    mutex_unlock(&inode->i_mutex);
    return vfs_setpos(file, data_ofs, maxbytes);
fail:
    mutex_unlock(&inode->i_mutex);
    return -ENXIO;
}

static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
{
    struct inode *inode = file->f_mapping->host;
    loff_t maxbytes = inode->i_sb->s_maxbytes;

    switch (whence) {
    case SEEK_SET:
    case SEEK_CUR:
    case SEEK_END:
        return generic_file_llseek_size(file, offset, whence,
                                        maxbytes);
    case SEEK_DATA:
    case SEEK_HOLE:
        if (offset < 0)
            return -ENXIO;
        return f2fs_seek_block(file, offset, whence);
    }

    return -EINVAL;
}

static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
    struct inode *inode = file_inode(file);

    /* we don't need to use inline_data strictly */
    if (f2fs_has_inline_data(inode)) {
        int err = f2fs_convert_inline_inode(inode);
        if (err)
            return err;
    }

    file_accessed(file);
    vma->vm_ops = &f2fs_file_vm_ops;
    return 0;
}

int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
{
    int nr_free = 0, ofs = dn->ofs_in_node;
    struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
    struct f2fs_node *raw_node;
    __le32 *addr;

    raw_node = F2FS_NODE(dn->node_page);
    addr = blkaddr_in_node(raw_node) + ofs;

    for (; count > 0; count--, addr++, dn->ofs_in_node++) {
        block_t blkaddr = le32_to_cpu(*addr);
        if (blkaddr == NULL_ADDR)
            continue;

        dn->data_blkaddr = NULL_ADDR;
        set_data_blkaddr(dn);
        f2fs_update_extent_cache(dn);
        invalidate_blocks(sbi, blkaddr);
        if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
            clear_inode_flag(F2FS_I(dn->inode),
                             FI_FIRST_BLOCK_WRITTEN);
        nr_free++;
    }
    if (nr_free) {
        dec_valid_block_count(sbi, dn->inode, nr_free);
        set_page_dirty(dn->node_page);
        sync_inode_page(dn);
    }
    dn->ofs_in_node = ofs;

    trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
                                          dn->ofs_in_node, nr_free);
    return nr_free;
}