Esempio n. 1
0
void f2fs_invalidate_page(struct page *page, unsigned int offset,
                          unsigned int length)
{
    struct inode *inode = page->mapping->host;
    struct f2fs_sb_info *sbi = F2FS_I_SB(inode);

    if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
            (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE))
        return;

    if (PageDirty(page)) {
        if (inode->i_ino == F2FS_META_INO(sbi))
            dec_page_count(sbi, F2FS_DIRTY_META);
        else if (inode->i_ino == F2FS_NODE_INO(sbi))
            dec_page_count(sbi, F2FS_DIRTY_NODES);
        else
            inode_dec_dirty_pages(inode);
    }

    /* This is atomic written page, keep Private */
    if (IS_ATOMIC_WRITTEN_PAGE(page))
        return;

    ClearPagePrivate(page);
}
/*
 * It only removes the dentry from the dentry page, corresponding name
 * entry in name page does not need to be touched during deletion.
 */
void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
                       struct inode *dir, struct inode *inode)
{
    struct	f2fs_dentry_block *dentry_blk;
    unsigned int bit_pos;
    int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len));
    int i;

    if (f2fs_has_inline_dentry(dir))
        return f2fs_delete_inline_entry(dentry, page, dir, inode);

    lock_page(page);
    f2fs_wait_on_page_writeback(page, DATA);

    dentry_blk = page_address(page);
    bit_pos = dentry - dentry_blk->dentry;
    for (i = 0; i < slots; i++)
        test_and_clear_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap);

    /* Let's check and deallocate this dentry page */
    bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
                               NR_DENTRY_IN_BLOCK,
                               0);
    kunmap(page); /* kunmap - pair of f2fs_find_entry */
    set_page_dirty(page);

    dir->i_ctime = dir->i_mtime = CURRENT_TIME;

    if (inode)
        f2fs_drop_nlink(dir, inode, NULL);

    if (bit_pos == NR_DENTRY_IN_BLOCK) {
        truncate_hole(dir, page->index, page->index + 1);
        clear_page_dirty_for_io(page);
        ClearPagePrivate(page);
        ClearPageUptodate(page);
        inode_dec_dirty_pages(dir);
    }
    f2fs_put_page(page, 1);
}
int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
{
    void *src_addr, *dst_addr;
    struct f2fs_io_info fio = {
        .sbi = F2FS_I_SB(dn->inode),
        .type = DATA,
        .rw = WRITE_SYNC | REQ_PRIO,
        .page = page,
        .encrypted_page = NULL,
    };
    int dirty, err;

    f2fs_bug_on(F2FS_I_SB(dn->inode), page->index);

    if (!f2fs_exist_data(dn->inode))
        goto clear_out;

    err = f2fs_reserve_block(dn, 0);
    if (err)
        return err;

    f2fs_wait_on_page_writeback(page, DATA);

    if (PageUptodate(page))
        goto no_update;

    zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);

    /* Copy the whole inline data block */
    src_addr = inline_data_addr(dn->inode_page);
    dst_addr = kmap_atomic(page);
    memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
    flush_dcache_page(page);
    kunmap_atomic(dst_addr);
    SetPageUptodate(page);
no_update:
    set_page_dirty(page);

    /* clear dirty state */
    dirty = clear_page_dirty_for_io(page);

    /* write data page to try to make data consistent */
    set_page_writeback(page);
    fio.blk_addr = dn->data_blkaddr;
    write_data_page(dn, &fio);
    set_data_blkaddr(dn);
    f2fs_update_extent_cache(dn);
    f2fs_wait_on_page_writeback(page, DATA);
    if (dirty)
        inode_dec_dirty_pages(dn->inode);

    /* this converted inline_data should be recovered. */
    set_inode_flag(F2FS_I(dn->inode), FI_APPEND_WRITE);

    /* clear inline data and flag after data writeback */
    truncate_inline_inode(dn->inode_page, 0);
clear_out:
    stat_dec_inline_inode(dn->inode);
    f2fs_clear_inline_inode(dn->inode);
    sync_inode_page(dn);
    f2fs_put_dnode(dn);
    return 0;
}

int f2fs_convert_inline_inode(struct inode *inode)
{
    struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
    struct dnode_of_data dn;
    struct page *ipage, *page;
    int err = 0;

    page = grab_cache_page(inode->i_mapping, 0);
    if (!page)
        return -ENOMEM;

    f2fs_lock_op(sbi);

    ipage = get_node_page(sbi, inode->i_ino);
    if (IS_ERR(ipage)) {
        err = PTR_ERR(ipage);
        goto out;
    }

    set_new_dnode(&dn, inode, ipage, ipage, 0);

    if (f2fs_has_inline_data(inode))
        err = f2fs_convert_inline_page(&dn, page);

    f2fs_put_dnode(&dn);
out:
    f2fs_unlock_op(sbi);

    f2fs_put_page(page, 1);
    return err;
}

int f2fs_write_inline_data(struct inode *inode, struct page *page)
{
    void *src_addr, *dst_addr;
    struct dnode_of_data dn;
    int err;

    set_new_dnode(&dn, inode, NULL, NULL, 0);
    err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
    if (err)
        return err;

    if (!f2fs_has_inline_data(inode)) {
        f2fs_put_dnode(&dn);
        return -EAGAIN;
    }

    f2fs_bug_on(F2FS_I_SB(inode), page->index);

    f2fs_wait_on_page_writeback(dn.inode_page, NODE);
    src_addr = kmap_atomic(page);
    dst_addr = inline_data_addr(dn.inode_page);
    memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
    kunmap_atomic(src_addr);

    set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
    set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);

    sync_inode_page(&dn);
    f2fs_put_dnode(&dn);
    return 0;
}
Esempio n. 4
0
static int f2fs_write_data_page(struct page *page,
                                struct writeback_control *wbc)
{
    struct inode *inode = page->mapping->host;
    struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
    loff_t i_size = i_size_read(inode);
    const pgoff_t end_index = ((unsigned long long) i_size)
                              >> PAGE_CACHE_SHIFT;
    unsigned offset = 0;
    bool need_balance_fs = false;
    int err = 0;
    struct f2fs_io_info fio = {
        .sbi = sbi,
        .type = DATA,
        .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
        .page = page,
        .encrypted_page = NULL,
    };

    trace_f2fs_writepage(page, DATA);

    if (page->index < end_index)
        goto write;

    /*
     * If the offset is out-of-range of file size,
     * this page does not have to be written to disk.
     */
    offset = i_size & (PAGE_CACHE_SIZE - 1);
    if ((page->index >= end_index + 1) || !offset)
        goto out;

    zero_user_segment(page, offset, PAGE_CACHE_SIZE);
write:
    if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
        goto redirty_out;
    if (f2fs_is_drop_cache(inode))
        goto out;
    if (f2fs_is_volatile_file(inode) && !wbc->for_reclaim &&
            available_free_memory(sbi, BASE_CHECK))
        goto redirty_out;

    /* Dentry blocks are controlled by checkpoint */
    if (S_ISDIR(inode->i_mode)) {
        if (unlikely(f2fs_cp_error(sbi)))
            goto redirty_out;
        err = do_write_data_page(&fio);
        goto done;
    }

    /* we should bypass data pages to proceed the kworkder jobs */
    if (unlikely(f2fs_cp_error(sbi))) {
        SetPageError(page);
        goto out;
    }

    if (!wbc->for_reclaim)
        need_balance_fs = true;
    else if (has_not_enough_free_secs(sbi, 0))
        goto redirty_out;

    err = -EAGAIN;
    f2fs_lock_op(sbi);
    if (f2fs_has_inline_data(inode))
        err = f2fs_write_inline_data(inode, page);
    if (err == -EAGAIN)
        err = do_write_data_page(&fio);
    f2fs_unlock_op(sbi);
done:
    if (err && err != -ENOENT)
        goto redirty_out;

    clear_cold_data(page);
out:
    inode_dec_dirty_pages(inode);
    if (err)
        ClearPageUptodate(page);
    unlock_page(page);
    if (need_balance_fs)
        f2fs_balance_fs(sbi);
    if (wbc->for_reclaim)
        f2fs_submit_merged_bio(sbi, DATA, WRITE);
    return 0;

redirty_out:
    redirty_page_for_writepage(wbc, page);
    return AOP_WRITEPAGE_ACTIVATE;
}

static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
                            void *data)
{
    struct address_space *mapping = data;
    int ret = mapping->a_ops->writepage(page, wbc);
    mapping_set_error(mapping, ret);
    return ret;
}

/*
 * This function was copied from write_cche_pages from mm/page-writeback.c.
 * The major change is making write step of cold data page separately from
 * warm/hot data page.
 */
static int f2fs_write_cache_pages(struct address_space *mapping,
                                  struct writeback_control *wbc, writepage_t writepage,
                                  void *data)
{
    int ret = 0;
    int done = 0;
    struct pagevec pvec;
    int nr_pages;
    pgoff_t uninitialized_var(writeback_index);
    pgoff_t index;
    pgoff_t end;		/* Inclusive */
    pgoff_t done_index;
    int cycled;
    int range_whole = 0;
    int tag;
    int step = 0;

    pagevec_init(&pvec, 0);
next:
    if (wbc->range_cyclic) {
        writeback_index = mapping->writeback_index; /* prev offset */
        index = writeback_index;
        if (index == 0)
            cycled = 1;
        else
            cycled = 0;
        end = -1;
    } else {
        index = wbc->range_start >> PAGE_CACHE_SHIFT;
        end = wbc->range_end >> PAGE_CACHE_SHIFT;
        if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
            range_whole = 1;
        cycled = 1; /* ignore range_cyclic tests */
    }
    if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
        tag = PAGECACHE_TAG_TOWRITE;
    else
        tag = PAGECACHE_TAG_DIRTY;
retry:
    if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
        tag_pages_for_writeback(mapping, index, end);
    done_index = index;
    while (!done && (index <= end)) {
        int i;

        nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
                                      min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1);
        if (nr_pages == 0)
            break;

        for (i = 0; i < nr_pages; i++) {
            struct page *page = pvec.pages[i];

            if (page->index > end) {
                done = 1;
                break;
            }

            done_index = page->index;

            lock_page(page);

            if (unlikely(page->mapping != mapping)) {
continue_unlock:
                unlock_page(page);
                continue;
            }

            if (!PageDirty(page)) {
                /* someone wrote it for us */
                goto continue_unlock;
            }

            if (step == is_cold_data(page))
                goto continue_unlock;

            if (PageWriteback(page)) {
                if (wbc->sync_mode != WB_SYNC_NONE)
                    f2fs_wait_on_page_writeback(page, DATA);
                else
                    goto continue_unlock;
            }

            BUG_ON(PageWriteback(page));
            if (!clear_page_dirty_for_io(page))
                goto continue_unlock;

            ret = (*writepage)(page, wbc, data);
            if (unlikely(ret)) {
                if (ret == AOP_WRITEPAGE_ACTIVATE) {
                    unlock_page(page);
                    ret = 0;
                } else {
                    done_index = page->index + 1;
                    done = 1;
                    break;
                }
            }

            if (--wbc->nr_to_write <= 0 &&
                    wbc->sync_mode == WB_SYNC_NONE) {
                done = 1;
                break;
            }
        }
        pagevec_release(&pvec);
        cond_resched();
    }

    if (step < 1) {
        step++;
        goto next;
    }

    if (!cycled && !done) {
        cycled = 1;
        index = 0;
        end = writeback_index - 1;
        goto retry;
    }
    if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
        mapping->writeback_index = done_index;

    return ret;
}
Esempio n. 5
0
File: inline.c Progetto: mdamt/linux
int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
{
	struct f2fs_io_info fio = {
		.sbi = F2FS_I_SB(dn->inode),
		.type = DATA,
		.op = REQ_OP_WRITE,
		.op_flags = REQ_SYNC | REQ_PRIO,
		.page = page,
		.encrypted_page = NULL,
	};
	int dirty, err;

	if (!f2fs_exist_data(dn->inode))
		goto clear_out;

	err = f2fs_reserve_block(dn, 0);
	if (err)
		return err;

	f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page));

	read_inline_data(page, dn->inode_page);
	set_page_dirty(page);

	/* clear dirty state */
	dirty = clear_page_dirty_for_io(page);

	/* write data page to try to make data consistent */
	set_page_writeback(page);
	fio.old_blkaddr = dn->data_blkaddr;
	set_inode_flag(dn->inode, FI_HOT_DATA);
	write_data_page(dn, &fio);
	f2fs_wait_on_page_writeback(page, DATA, true);
	if (dirty) {
		inode_dec_dirty_pages(dn->inode);
		remove_dirty_inode(dn->inode);
	}

	/* this converted inline_data should be recovered. */
	set_inode_flag(dn->inode, FI_APPEND_WRITE);

	/* clear inline data and flag after data writeback */
	truncate_inline_inode(dn->inode, dn->inode_page, 0);
	clear_inline_node(dn->inode_page);
clear_out:
	stat_dec_inline_inode(dn->inode);
	clear_inode_flag(dn->inode, FI_INLINE_DATA);
	f2fs_put_dnode(dn);
	return 0;
}

int f2fs_convert_inline_inode(struct inode *inode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct dnode_of_data dn;
	struct page *ipage, *page;
	int err = 0;

	if (!f2fs_has_inline_data(inode))
		return 0;

	page = f2fs_grab_cache_page(inode->i_mapping, 0, false);
	if (!page)
		return -ENOMEM;

	f2fs_lock_op(sbi);

	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto out;
	}

	set_new_dnode(&dn, inode, ipage, ipage, 0);

	if (f2fs_has_inline_data(inode))
		err = f2fs_convert_inline_page(&dn, page);

	f2fs_put_dnode(&dn);
out:
	f2fs_unlock_op(sbi);

	f2fs_put_page(page, 1);

	f2fs_balance_fs(sbi, dn.node_changed);

	return err;
}

int f2fs_write_inline_data(struct inode *inode, struct page *page)
{
	void *src_addr, *dst_addr;
	struct dnode_of_data dn;
	int err;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
	err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
	if (err)
		return err;

	if (!f2fs_has_inline_data(inode)) {
		f2fs_put_dnode(&dn);
		return -EAGAIN;
	}

	f2fs_bug_on(F2FS_I_SB(inode), page->index);

	f2fs_wait_on_page_writeback(dn.inode_page, NODE, true);
	src_addr = kmap_atomic(page);
	dst_addr = inline_data_addr(dn.inode_page);
	memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
	kunmap_atomic(src_addr);
	set_page_dirty(dn.inode_page);

	set_inode_flag(inode, FI_APPEND_WRITE);
	set_inode_flag(inode, FI_DATA_EXIST);

	clear_inline_node(dn.inode_page);
	f2fs_put_dnode(&dn);
	return 0;
}