예제 #1
0
파일: inode.c 프로젝트: aejsmith/linux
void update_inode(struct inode *inode, struct page *node_page)
{
    struct f2fs_inode *ri;

    f2fs_wait_on_page_writeback(node_page, NODE);

    ri = F2FS_INODE(node_page);

    ri->i_mode = cpu_to_le16(inode->i_mode);
    ri->i_advise = F2FS_I(inode)->i_advise;
    ri->i_uid = cpu_to_le32(i_uid_read(inode));
    ri->i_gid = cpu_to_le32(i_gid_read(inode));
    ri->i_links = cpu_to_le32(inode->i_nlink);
    ri->i_size = cpu_to_le64(i_size_read(inode));
    ri->i_blocks = cpu_to_le64(inode->i_blocks);

    if (F2FS_I(inode)->extent_tree)
        set_raw_extent(&F2FS_I(inode)->extent_tree->largest,
                       &ri->i_ext);
    else
        memset(&ri->i_ext, 0, sizeof(ri->i_ext));
    set_raw_inline(F2FS_I(inode), ri);

    ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
    ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
    ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
    ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
    ri->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
    ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
    ri->i_current_depth = cpu_to_le32(F2FS_I(inode)->i_current_depth);
    ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid);
    ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
    ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
    ri->i_generation = cpu_to_le32(inode->i_generation);
    ri->i_dir_level = F2FS_I(inode)->i_dir_level;

    __set_inode_rdev(inode, ri);
    set_cold_node(inode, node_page);
    set_page_dirty(node_page);

    clear_inode_flag(F2FS_I(inode), FI_DIRTY_INODE);
}
/*
 * It only removes the dentry from the dentry page, corresponding name
 * entry in name page does not need to be touched during deletion.
 */
void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
                       struct inode *dir, struct inode *inode)
{
    struct	f2fs_dentry_block *dentry_blk;
    unsigned int bit_pos;
    int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len));
    int i;

    if (f2fs_has_inline_dentry(dir))
        return f2fs_delete_inline_entry(dentry, page, dir, inode);

    lock_page(page);
    f2fs_wait_on_page_writeback(page, DATA);

    dentry_blk = page_address(page);
    bit_pos = dentry - dentry_blk->dentry;
    for (i = 0; i < slots; i++)
        test_and_clear_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap);

    /* Let's check and deallocate this dentry page */
    bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
                               NR_DENTRY_IN_BLOCK,
                               0);
    kunmap(page); /* kunmap - pair of f2fs_find_entry */
    set_page_dirty(page);

    dir->i_ctime = dir->i_mtime = CURRENT_TIME;

    if (inode)
        f2fs_drop_nlink(dir, inode, NULL);

    if (bit_pos == NR_DENTRY_IN_BLOCK) {
        truncate_hole(dir, page->index, page->index + 1);
        clear_page_dirty_for_io(page);
        ClearPagePrivate(page);
        ClearPageUptodate(page);
        inode_dec_dirty_pages(dir);
    }
    f2fs_put_page(page, 1);
}
예제 #3
0
static void fill_zero(struct inode *inode, pgoff_t index,
					loff_t start, loff_t len)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct page *page;

	if (!len)
		return;

	f2fs_balance_fs(sbi);

	f2fs_lock_op(sbi);
	page = get_new_data_page(inode, NULL, index, false);
	f2fs_unlock_op(sbi);

	if (!IS_ERR(page)) {
		f2fs_wait_on_page_writeback(page, DATA);
		zero_user(page, start, len);
		set_page_dirty(page);
		f2fs_put_page(page, 1);
	}
}
예제 #4
0
static int truncate_partial_data_page(struct inode *inode, u64 from)
{
	unsigned offset = from & (PAGE_CACHE_SIZE - 1);
	struct page *page;

	if (!offset)
		return 0;

	page = find_data_page(inode, from >> PAGE_CACHE_SHIFT, false);
	if (IS_ERR(page))
		return 0;

	lock_page(page);
	if (unlikely(!PageUptodate(page) ||
			page->mapping != inode->i_mapping))
		goto out;

	f2fs_wait_on_page_writeback(page, DATA);
	zero_user(page, offset, PAGE_CACHE_SIZE - offset);
	set_page_dirty(page);
out:
	f2fs_put_page(page, 1);
	return 0;
}
예제 #5
0
파일: inline.c 프로젝트: 7799/linux
static int __f2fs_convert_inline_data(struct inode *inode, struct page *page)
{
	int err;
	struct page *ipage;
	struct dnode_of_data dn;
	void *src_addr, *dst_addr;
	block_t new_blk_addr;
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
	struct f2fs_io_info fio = {
		.type = DATA,
		.rw = WRITE_SYNC | REQ_PRIO,
	};

	f2fs_lock_op(sbi);
	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage))
		return PTR_ERR(ipage);

	/*
	 * i_addr[0] is not used for inline data,
	 * so reserving new block will not destroy inline data
	 */
	set_new_dnode(&dn, inode, ipage, NULL, 0);
	err = f2fs_reserve_block(&dn, 0);
	if (err) {
		f2fs_unlock_op(sbi);
		return err;
	}

	zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);

	/* Copy the whole inline data block */
	src_addr = inline_data_addr(ipage);
	dst_addr = kmap(page);
	memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
	kunmap(page);
	SetPageUptodate(page);

	/* write data page to try to make data consistent */
	set_page_writeback(page);
	write_data_page(page, &dn, &new_blk_addr, &fio);
	update_extent_cache(new_blk_addr, &dn);
	f2fs_wait_on_page_writeback(page, DATA);

	/* clear inline data and flag after data writeback */
	zero_user_segment(ipage, INLINE_DATA_OFFSET,
				 INLINE_DATA_OFFSET + MAX_INLINE_DATA);
	clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
	stat_dec_inline_inode(inode);

	sync_inode_page(&dn);
	f2fs_put_dnode(&dn);
	f2fs_unlock_op(sbi);
	return err;
}

int f2fs_convert_inline_data(struct inode *inode, pgoff_t to_size)
{
	struct page *page;
	int err;

	if (!f2fs_has_inline_data(inode))
		return 0;
	else if (to_size <= MAX_INLINE_DATA)
		return 0;

	page = grab_cache_page_write_begin(inode->i_mapping, 0, AOP_FLAG_NOFS);
	if (!page)
		return -ENOMEM;

	err = __f2fs_convert_inline_data(inode, page);
	f2fs_put_page(page, 1);
	return err;
}

int f2fs_write_inline_data(struct inode *inode,
			   struct page *page, unsigned size)
{
	void *src_addr, *dst_addr;
	struct page *ipage;
	struct dnode_of_data dn;
	int err;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
	err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
	if (err)
		return err;
	ipage = dn.inode_page;

	zero_user_segment(ipage, INLINE_DATA_OFFSET,
				 INLINE_DATA_OFFSET + MAX_INLINE_DATA);
	src_addr = kmap(page);
	dst_addr = inline_data_addr(ipage);
	memcpy(dst_addr, src_addr, size);
	kunmap(page);

	/* Release the first data block if it is allocated */
	if (!f2fs_has_inline_data(inode)) {
		truncate_data_blocks_range(&dn, 1);
		set_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
		stat_inc_inline_inode(inode);
	}

	sync_inode_page(&dn);
	f2fs_put_dnode(&dn);

	return 0;
}
예제 #6
0
int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name,
                          struct inode *inode, nid_t ino, umode_t mode)
{
    struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
    struct page *ipage;
    unsigned int bit_pos;
    f2fs_hash_t name_hash;
    size_t namelen = name->len;
    struct f2fs_inline_dentry *dentry_blk = NULL;
    struct f2fs_dentry_ptr d;
    int slots = GET_DENTRY_SLOTS(namelen);
    struct page *page = NULL;
    int err = 0;

    ipage = get_node_page(sbi, dir->i_ino);
    if (IS_ERR(ipage))
        return PTR_ERR(ipage);

    dentry_blk = inline_data_addr(ipage);
    bit_pos = room_for_filename(&dentry_blk->dentry_bitmap,
                                slots, NR_INLINE_DENTRY);
    if (bit_pos >= NR_INLINE_DENTRY) {
        err = f2fs_convert_inline_dir(dir, ipage, dentry_blk);
        if (!err)
            err = -EAGAIN;
        goto out;
    }

    if (inode) {
        down_write(&F2FS_I(inode)->i_sem);
        page = init_inode_metadata(inode, dir, name, ipage);
        if (IS_ERR(page)) {
            err = PTR_ERR(page);
            goto fail;
        }
    }

    f2fs_wait_on_page_writeback(ipage, NODE);

    name_hash = f2fs_dentry_hash(name);
    make_dentry_ptr(NULL, &d, (void *)dentry_blk, 2);
    f2fs_update_dentry(ino, mode, &d, name, name_hash, bit_pos);

    set_page_dirty(ipage);

    /* we don't need to mark_inode_dirty now */
    if (inode) {
        F2FS_I(inode)->i_pino = dir->i_ino;
        update_inode(inode, page);
        f2fs_put_page(page, 1);
    }

    update_parent_metadata(dir, inode, 0);
fail:
    if (inode)
        up_write(&F2FS_I(inode)->i_sem);

    if (is_inode_flag_set(F2FS_I(dir), FI_UPDATE_DIR)) {
        update_inode(dir, ipage);
        clear_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
    }
out:
    f2fs_put_page(ipage, 1);
    return err;
}
예제 #7
0
int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
{
    void *src_addr, *dst_addr;
    struct f2fs_io_info fio = {
        .sbi = F2FS_I_SB(dn->inode),
        .type = DATA,
        .rw = WRITE_SYNC | REQ_PRIO,
        .page = page,
        .encrypted_page = NULL,
    };
    int dirty, err;

    f2fs_bug_on(F2FS_I_SB(dn->inode), page->index);

    if (!f2fs_exist_data(dn->inode))
        goto clear_out;

    err = f2fs_reserve_block(dn, 0);
    if (err)
        return err;

    f2fs_wait_on_page_writeback(page, DATA);

    if (PageUptodate(page))
        goto no_update;

    zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);

    /* Copy the whole inline data block */
    src_addr = inline_data_addr(dn->inode_page);
    dst_addr = kmap_atomic(page);
    memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
    flush_dcache_page(page);
    kunmap_atomic(dst_addr);
    SetPageUptodate(page);
no_update:
    set_page_dirty(page);

    /* clear dirty state */
    dirty = clear_page_dirty_for_io(page);

    /* write data page to try to make data consistent */
    set_page_writeback(page);
    fio.blk_addr = dn->data_blkaddr;
    write_data_page(dn, &fio);
    set_data_blkaddr(dn);
    f2fs_update_extent_cache(dn);
    f2fs_wait_on_page_writeback(page, DATA);
    if (dirty)
        inode_dec_dirty_pages(dn->inode);

    /* this converted inline_data should be recovered. */
    set_inode_flag(F2FS_I(dn->inode), FI_APPEND_WRITE);

    /* clear inline data and flag after data writeback */
    truncate_inline_inode(dn->inode_page, 0);
clear_out:
    stat_dec_inline_inode(dn->inode);
    f2fs_clear_inline_inode(dn->inode);
    sync_inode_page(dn);
    f2fs_put_dnode(dn);
    return 0;
}

int f2fs_convert_inline_inode(struct inode *inode)
{
    struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
    struct dnode_of_data dn;
    struct page *ipage, *page;
    int err = 0;

    page = grab_cache_page(inode->i_mapping, 0);
    if (!page)
        return -ENOMEM;

    f2fs_lock_op(sbi);

    ipage = get_node_page(sbi, inode->i_ino);
    if (IS_ERR(ipage)) {
        err = PTR_ERR(ipage);
        goto out;
    }

    set_new_dnode(&dn, inode, ipage, ipage, 0);

    if (f2fs_has_inline_data(inode))
        err = f2fs_convert_inline_page(&dn, page);

    f2fs_put_dnode(&dn);
out:
    f2fs_unlock_op(sbi);

    f2fs_put_page(page, 1);
    return err;
}

int f2fs_write_inline_data(struct inode *inode, struct page *page)
{
    void *src_addr, *dst_addr;
    struct dnode_of_data dn;
    int err;

    set_new_dnode(&dn, inode, NULL, NULL, 0);
    err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
    if (err)
        return err;

    if (!f2fs_has_inline_data(inode)) {
        f2fs_put_dnode(&dn);
        return -EAGAIN;
    }

    f2fs_bug_on(F2FS_I_SB(inode), page->index);

    f2fs_wait_on_page_writeback(dn.inode_page, NODE);
    src_addr = kmap_atomic(page);
    dst_addr = inline_data_addr(dn.inode_page);
    memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
    kunmap_atomic(src_addr);

    set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
    set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);

    sync_inode_page(&dn);
    f2fs_put_dnode(&dn);
    return 0;
}
예제 #8
0
파일: recovery.c 프로젝트: avagin/linux
static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
					struct page *page)
{
	struct dnode_of_data dn;
	struct node_info ni;
	unsigned int start, end;
	int err = 0, recovered = 0;

	/* step 1: recover xattr */
	if (IS_INODE(page)) {
		f2fs_recover_inline_xattr(inode, page);
	} else if (f2fs_has_xattr_block(ofs_of_node(page))) {
		err = f2fs_recover_xattr_data(inode, page);
		if (!err)
			recovered++;
		goto out;
	}

	/* step 2: recover inline data */
	if (f2fs_recover_inline_data(inode, page))
		goto out;

	/* step 3: recover data indices */
	start = f2fs_start_bidx_of_node(ofs_of_node(page), inode);
	end = start + ADDRS_PER_PAGE(page, inode);

	set_new_dnode(&dn, inode, NULL, NULL, 0);
retry_dn:
	err = f2fs_get_dnode_of_data(&dn, start, ALLOC_NODE);
	if (err) {
		if (err == -ENOMEM) {
			congestion_wait(BLK_RW_ASYNC, HZ/50);
			goto retry_dn;
		}
		goto out;
	}

	f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);

	err = f2fs_get_node_info(sbi, dn.nid, &ni);
	if (err)
		goto err;

	f2fs_bug_on(sbi, ni.ino != ino_of_node(page));

	if (ofs_of_node(dn.node_page) != ofs_of_node(page)) {
		f2fs_msg(sbi->sb, KERN_WARNING,
			"Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u",
			inode->i_ino, ofs_of_node(dn.node_page),
			ofs_of_node(page));
		err = -EFAULT;
		goto err;
	}

	for (; start < end; start++, dn.ofs_in_node++) {
		block_t src, dest;

		src = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);
		dest = datablock_addr(dn.inode, page, dn.ofs_in_node);

		if (__is_valid_data_blkaddr(src) &&
			!f2fs_is_valid_blkaddr(sbi, src, META_POR)) {
			err = -EFAULT;
			goto err;
		}

		if (__is_valid_data_blkaddr(dest) &&
			!f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
			err = -EFAULT;
			goto err;
		}

		/* skip recovering if dest is the same as src */
		if (src == dest)
			continue;

		/* dest is invalid, just invalidate src block */
		if (dest == NULL_ADDR) {
			f2fs_truncate_data_blocks_range(&dn, 1);
			continue;
		}

		if (!file_keep_isize(inode) &&
			(i_size_read(inode) <= ((loff_t)start << PAGE_SHIFT)))
			f2fs_i_size_write(inode,
				(loff_t)(start + 1) << PAGE_SHIFT);

		/*
		 * dest is reserved block, invalidate src block
		 * and then reserve one new block in dnode page.
		 */
		if (dest == NEW_ADDR) {
			f2fs_truncate_data_blocks_range(&dn, 1);
			f2fs_reserve_new_block(&dn);
			continue;
		}

		/* dest is valid block, try to recover from src to dest */
		if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {

			if (src == NULL_ADDR) {
				err = f2fs_reserve_new_block(&dn);
				while (err &&
				       IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION))
					err = f2fs_reserve_new_block(&dn);
				/* We should not get -ENOSPC */
				f2fs_bug_on(sbi, err);
				if (err)
					goto err;
			}
retry_prev:
			/* Check the previous node page having this index */
			err = check_index_in_prev_nodes(sbi, dest, &dn);
			if (err) {
				if (err == -ENOMEM) {
					congestion_wait(BLK_RW_ASYNC, HZ/50);
					goto retry_prev;
				}
				goto err;
			}

			/* write dummy data page */
			f2fs_replace_block(sbi, &dn, src, dest,
						ni.version, false, false);
			recovered++;
		}
	}

	copy_node_footer(dn.node_page, page);
	fill_node_footer(dn.node_page, dn.nid, ni.ino,
					ofs_of_node(page), false);
	set_page_dirty(dn.node_page);
err:
	f2fs_put_dnode(&dn);
out:
	f2fs_msg(sbi->sb, KERN_NOTICE,
		"recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d",
		inode->i_ino,
		file_keep_isize(inode) ? "keep" : "recover",
		recovered, err);
	return err;
}
예제 #9
0
파일: data.c 프로젝트: aejsmith/linux
/*
 * This function was originally taken from fs/mpage.c, and customized for f2fs.
 * Major change was from block_size == page_size in f2fs by default.
 */
static int f2fs_mpage_readpages(struct address_space *mapping,
                                struct list_head *pages, struct page *page,
                                unsigned nr_pages)
{
    struct bio *bio = NULL;
    unsigned page_idx;
    sector_t last_block_in_bio = 0;
    struct inode *inode = mapping->host;
    const unsigned blkbits = inode->i_blkbits;
    const unsigned blocksize = 1 << blkbits;
    sector_t block_in_file;
    sector_t last_block;
    sector_t last_block_in_file;
    sector_t block_nr;
    struct block_device *bdev = inode->i_sb->s_bdev;
    struct f2fs_map_blocks map;

    map.m_pblk = 0;
    map.m_lblk = 0;
    map.m_len = 0;
    map.m_flags = 0;

    for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {

        prefetchw(&page->flags);
        if (pages) {
            page = list_entry(pages->prev, struct page, lru);
            list_del(&page->lru);
            if (add_to_page_cache_lru(page, mapping,
                                      page->index, GFP_KERNEL))
                goto next_page;
        }

        block_in_file = (sector_t)page->index;
        last_block = block_in_file + nr_pages;
        last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
                             blkbits;
        if (last_block > last_block_in_file)
            last_block = last_block_in_file;

        /*
         * Map blocks using the previous result first.
         */
        if ((map.m_flags & F2FS_MAP_MAPPED) &&
                block_in_file > map.m_lblk &&
                block_in_file < (map.m_lblk + map.m_len))
            goto got_it;

        /*
         * Then do more f2fs_map_blocks() calls until we are
         * done with this page.
         */
        map.m_flags = 0;

        if (block_in_file < last_block) {
            map.m_lblk = block_in_file;
            map.m_len = last_block - block_in_file;

            if (f2fs_map_blocks(inode, &map, 0, false))
                goto set_error_page;
        }
got_it:
        if ((map.m_flags & F2FS_MAP_MAPPED)) {
            block_nr = map.m_pblk + block_in_file - map.m_lblk;
            SetPageMappedToDisk(page);

            if (!PageUptodate(page) && !cleancache_get_page(page)) {
                SetPageUptodate(page);
                goto confused;
            }
        } else {
            zero_user_segment(page, 0, PAGE_CACHE_SIZE);
            SetPageUptodate(page);
            unlock_page(page);
            goto next_page;
        }

        /*
         * This page will go to BIO.  Do we need to send this
         * BIO off first?
         */
        if (bio && (last_block_in_bio != block_nr - 1)) {
submit_and_realloc:
            submit_bio(READ, bio);
            bio = NULL;
        }
        if (bio == NULL) {
            struct f2fs_crypto_ctx *ctx = NULL;

            if (f2fs_encrypted_inode(inode) &&
                    S_ISREG(inode->i_mode)) {
                struct page *cpage;

                ctx = f2fs_get_crypto_ctx(inode);
                if (IS_ERR(ctx))
                    goto set_error_page;

                /* wait the page to be moved by cleaning */
                cpage = find_lock_page(
                            META_MAPPING(F2FS_I_SB(inode)),
                            block_nr);
                if (cpage) {
                    f2fs_wait_on_page_writeback(cpage,
                                                DATA);
                    f2fs_put_page(cpage, 1);
                }
            }

            bio = bio_alloc(GFP_KERNEL,
                            min_t(int, nr_pages, BIO_MAX_PAGES));
            if (!bio) {
                if (ctx)
                    f2fs_release_crypto_ctx(ctx);
                goto set_error_page;
            }
            bio->bi_bdev = bdev;
            bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr);
            bio->bi_end_io = f2fs_read_end_io;
            bio->bi_private = ctx;
        }

        if (bio_add_page(bio, page, blocksize, 0) < blocksize)
            goto submit_and_realloc;

        last_block_in_bio = block_nr;
        goto next_page;
set_error_page:
        SetPageError(page);
        zero_user_segment(page, 0, PAGE_CACHE_SIZE);
        unlock_page(page);
        goto next_page;
confused:
        if (bio) {
            submit_bio(READ, bio);
            bio = NULL;
        }
        unlock_page(page);
next_page:
        if (pages)
            page_cache_release(page);
    }
    BUG_ON(pages && !list_empty(pages));
    if (bio)
        submit_bio(READ, bio);
    return 0;
}
예제 #10
0
파일: data.c 프로젝트: aejsmith/linux
static int f2fs_write_begin(struct file *file, struct address_space *mapping,
                            loff_t pos, unsigned len, unsigned flags,
                            struct page **pagep, void **fsdata)
{
    struct inode *inode = mapping->host;
    struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
    struct page *page = NULL;
    struct page *ipage;
    pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
    struct dnode_of_data dn;
    int err = 0;

    trace_f2fs_write_begin(inode, pos, len, flags);

    f2fs_balance_fs(sbi);

    /*
     * We should check this at this moment to avoid deadlock on inode page
     * and #0 page. The locking rule for inline_data conversion should be:
     * lock_page(page #0) -> lock_page(inode_page)
     */
    if (index != 0) {
        err = f2fs_convert_inline_inode(inode);
        if (err)
            goto fail;
    }
repeat:
    page = grab_cache_page_write_begin(mapping, index, flags);
    if (!page) {
        err = -ENOMEM;
        goto fail;
    }

    *pagep = page;

    f2fs_lock_op(sbi);

    /* check inline_data */
    ipage = get_node_page(sbi, inode->i_ino);
    if (IS_ERR(ipage)) {
        err = PTR_ERR(ipage);
        goto unlock_fail;
    }

    set_new_dnode(&dn, inode, ipage, ipage, 0);

    if (f2fs_has_inline_data(inode)) {
        if (pos + len <= MAX_INLINE_DATA) {
            read_inline_data(page, ipage);
            set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
            sync_inode_page(&dn);
            goto put_next;
        }
        err = f2fs_convert_inline_page(&dn, page);
        if (err)
            goto put_fail;
    }

    err = f2fs_get_block(&dn, index);
    if (err)
        goto put_fail;
put_next:
    f2fs_put_dnode(&dn);
    f2fs_unlock_op(sbi);

    f2fs_wait_on_page_writeback(page, DATA);

    if (len == PAGE_CACHE_SIZE)
        goto out_update;
    if (PageUptodate(page))
        goto out_clear;

    if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
        unsigned start = pos & (PAGE_CACHE_SIZE - 1);
        unsigned end = start + len;

        /* Reading beyond i_size is simple: memset to zero */
        zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
        goto out_update;
    }

    if (dn.data_blkaddr == NEW_ADDR) {
        zero_user_segment(page, 0, PAGE_CACHE_SIZE);
    } else {
        struct f2fs_io_info fio = {
            .sbi = sbi,
            .type = DATA,
            .rw = READ_SYNC,
            .blk_addr = dn.data_blkaddr,
            .page = page,
            .encrypted_page = NULL,
        };
        err = f2fs_submit_page_bio(&fio);
        if (err)
            goto fail;

        lock_page(page);
        if (unlikely(!PageUptodate(page))) {
            err = -EIO;
            goto fail;
        }
        if (unlikely(page->mapping != mapping)) {
            f2fs_put_page(page, 1);
            goto repeat;
        }

        /* avoid symlink page */
        if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
            err = f2fs_decrypt_one(inode, page);
            if (err)
                goto fail;
        }
    }
out_update:
    SetPageUptodate(page);
out_clear:
    clear_cold_data(page);
    return 0;

put_fail:
    f2fs_put_dnode(&dn);
unlock_fail:
    f2fs_unlock_op(sbi);
fail:
    f2fs_put_page(page, 1);
    f2fs_write_failed(mapping, pos + len);
    return err;
}

static int f2fs_write_end(struct file *file,
                          struct address_space *mapping,
                          loff_t pos, unsigned len, unsigned copied,
                          struct page *page, void *fsdata)
{
    struct inode *inode = page->mapping->host;

    trace_f2fs_write_end(inode, pos, len, copied);

    set_page_dirty(page);

    if (pos + copied > i_size_read(inode)) {
        i_size_write(inode, pos + copied);
        mark_inode_dirty(inode);
        update_inode_page(inode);
    }

    f2fs_put_page(page, 1);
    return copied;
}

static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
                           loff_t offset)
{
    unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;

    if (offset & blocksize_mask)
        return -EINVAL;

    if (iov_iter_alignment(iter) & blocksize_mask)
        return -EINVAL;

    return 0;
}
예제 #11
0
파일: data.c 프로젝트: aejsmith/linux
static int f2fs_write_data_page(struct page *page,
                                struct writeback_control *wbc)
{
    struct inode *inode = page->mapping->host;
    struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
    loff_t i_size = i_size_read(inode);
    const pgoff_t end_index = ((unsigned long long) i_size)
                              >> PAGE_CACHE_SHIFT;
    unsigned offset = 0;
    bool need_balance_fs = false;
    int err = 0;
    struct f2fs_io_info fio = {
        .sbi = sbi,
        .type = DATA,
        .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
        .page = page,
        .encrypted_page = NULL,
    };

    trace_f2fs_writepage(page, DATA);

    if (page->index < end_index)
        goto write;

    /*
     * If the offset is out-of-range of file size,
     * this page does not have to be written to disk.
     */
    offset = i_size & (PAGE_CACHE_SIZE - 1);
    if ((page->index >= end_index + 1) || !offset)
        goto out;

    zero_user_segment(page, offset, PAGE_CACHE_SIZE);
write:
    if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
        goto redirty_out;
    if (f2fs_is_drop_cache(inode))
        goto out;
    if (f2fs_is_volatile_file(inode) && !wbc->for_reclaim &&
            available_free_memory(sbi, BASE_CHECK))
        goto redirty_out;

    /* Dentry blocks are controlled by checkpoint */
    if (S_ISDIR(inode->i_mode)) {
        if (unlikely(f2fs_cp_error(sbi)))
            goto redirty_out;
        err = do_write_data_page(&fio);
        goto done;
    }

    /* we should bypass data pages to proceed the kworkder jobs */
    if (unlikely(f2fs_cp_error(sbi))) {
        SetPageError(page);
        goto out;
    }

    if (!wbc->for_reclaim)
        need_balance_fs = true;
    else if (has_not_enough_free_secs(sbi, 0))
        goto redirty_out;

    err = -EAGAIN;
    f2fs_lock_op(sbi);
    if (f2fs_has_inline_data(inode))
        err = f2fs_write_inline_data(inode, page);
    if (err == -EAGAIN)
        err = do_write_data_page(&fio);
    f2fs_unlock_op(sbi);
done:
    if (err && err != -ENOENT)
        goto redirty_out;

    clear_cold_data(page);
out:
    inode_dec_dirty_pages(inode);
    if (err)
        ClearPageUptodate(page);
    unlock_page(page);
    if (need_balance_fs)
        f2fs_balance_fs(sbi);
    if (wbc->for_reclaim)
        f2fs_submit_merged_bio(sbi, DATA, WRITE);
    return 0;

redirty_out:
    redirty_page_for_writepage(wbc, page);
    return AOP_WRITEPAGE_ACTIVATE;
}

static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
                            void *data)
{
    struct address_space *mapping = data;
    int ret = mapping->a_ops->writepage(page, wbc);
    mapping_set_error(mapping, ret);
    return ret;
}

/*
 * This function was copied from write_cche_pages from mm/page-writeback.c.
 * The major change is making write step of cold data page separately from
 * warm/hot data page.
 */
static int f2fs_write_cache_pages(struct address_space *mapping,
                                  struct writeback_control *wbc, writepage_t writepage,
                                  void *data)
{
    int ret = 0;
    int done = 0;
    struct pagevec pvec;
    int nr_pages;
    pgoff_t uninitialized_var(writeback_index);
    pgoff_t index;
    pgoff_t end;		/* Inclusive */
    pgoff_t done_index;
    int cycled;
    int range_whole = 0;
    int tag;
    int step = 0;

    pagevec_init(&pvec, 0);
next:
    if (wbc->range_cyclic) {
        writeback_index = mapping->writeback_index; /* prev offset */
        index = writeback_index;
        if (index == 0)
            cycled = 1;
        else
            cycled = 0;
        end = -1;
    } else {
        index = wbc->range_start >> PAGE_CACHE_SHIFT;
        end = wbc->range_end >> PAGE_CACHE_SHIFT;
        if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
            range_whole = 1;
        cycled = 1; /* ignore range_cyclic tests */
    }
    if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
        tag = PAGECACHE_TAG_TOWRITE;
    else
        tag = PAGECACHE_TAG_DIRTY;
retry:
    if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
        tag_pages_for_writeback(mapping, index, end);
    done_index = index;
    while (!done && (index <= end)) {
        int i;

        nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
                                      min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1);
        if (nr_pages == 0)
            break;

        for (i = 0; i < nr_pages; i++) {
            struct page *page = pvec.pages[i];

            if (page->index > end) {
                done = 1;
                break;
            }

            done_index = page->index;

            lock_page(page);

            if (unlikely(page->mapping != mapping)) {
continue_unlock:
                unlock_page(page);
                continue;
            }

            if (!PageDirty(page)) {
                /* someone wrote it for us */
                goto continue_unlock;
            }

            if (step == is_cold_data(page))
                goto continue_unlock;

            if (PageWriteback(page)) {
                if (wbc->sync_mode != WB_SYNC_NONE)
                    f2fs_wait_on_page_writeback(page, DATA);
                else
                    goto continue_unlock;
            }

            BUG_ON(PageWriteback(page));
            if (!clear_page_dirty_for_io(page))
                goto continue_unlock;

            ret = (*writepage)(page, wbc, data);
            if (unlikely(ret)) {
                if (ret == AOP_WRITEPAGE_ACTIVATE) {
                    unlock_page(page);
                    ret = 0;
                } else {
                    done_index = page->index + 1;
                    done = 1;
                    break;
                }
            }

            if (--wbc->nr_to_write <= 0 &&
                    wbc->sync_mode == WB_SYNC_NONE) {
                done = 1;
                break;
            }
        }
        pagevec_release(&pvec);
        cond_resched();
    }

    if (step < 1) {
        step++;
        goto next;
    }

    if (!cycled && !done) {
        cycled = 1;
        index = 0;
        end = writeback_index - 1;
        goto retry;
    }
    if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
        mapping->writeback_index = done_index;

    return ret;
}
예제 #12
0
static void truncate_inline_data(struct page *ipage)
{
	f2fs_wait_on_page_writeback(ipage, NODE);
	memset(inline_data_addr(ipage), 0, MAX_INLINE_DATA);
}
예제 #13
0
파일: inode.c 프로젝트: Lyude/linux
void f2fs_update_inode(struct inode *inode, struct page *node_page)
{
	struct f2fs_inode *ri;
	struct extent_tree *et = F2FS_I(inode)->extent_tree;

	f2fs_wait_on_page_writeback(node_page, NODE, true);
	set_page_dirty(node_page);

	f2fs_inode_synced(inode);

	ri = F2FS_INODE(node_page);

	ri->i_mode = cpu_to_le16(inode->i_mode);
	ri->i_advise = F2FS_I(inode)->i_advise;
	ri->i_uid = cpu_to_le32(i_uid_read(inode));
	ri->i_gid = cpu_to_le32(i_gid_read(inode));
	ri->i_links = cpu_to_le32(inode->i_nlink);
	ri->i_size = cpu_to_le64(i_size_read(inode));
	ri->i_blocks = cpu_to_le64(SECTOR_TO_BLOCK(inode->i_blocks) + 1);

	if (et) {
		read_lock(&et->lock);
		set_raw_extent(&et->largest, &ri->i_ext);
		read_unlock(&et->lock);
	} else {
		memset(&ri->i_ext, 0, sizeof(ri->i_ext));
	}
	set_raw_inline(inode, ri);

	ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
	ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
	ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
	ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
	ri->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
	ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
	if (S_ISDIR(inode->i_mode))
		ri->i_current_depth =
			cpu_to_le32(F2FS_I(inode)->i_current_depth);
	else if (S_ISREG(inode->i_mode))
		ri->i_gc_failures =
			cpu_to_le16(F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]);
	ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid);
	ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
	ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
	ri->i_generation = cpu_to_le32(inode->i_generation);
	ri->i_dir_level = F2FS_I(inode)->i_dir_level;

	if (f2fs_has_extra_attr(inode)) {
		ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize);

		if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode)->sb))
			ri->i_inline_xattr_size =
				cpu_to_le16(F2FS_I(inode)->i_inline_xattr_size);

		if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)->sb) &&
			F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
								i_projid)) {
			projid_t i_projid;

			i_projid = from_kprojid(&init_user_ns,
						F2FS_I(inode)->i_projid);
			ri->i_projid = cpu_to_le32(i_projid);
		}

		if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)->sb) &&
			F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
								i_crtime)) {
			ri->i_crtime =
				cpu_to_le64(F2FS_I(inode)->i_crtime.tv_sec);
			ri->i_crtime_nsec =
				cpu_to_le32(F2FS_I(inode)->i_crtime.tv_nsec);
		}
	}

	__set_inode_rdev(inode, ri);

	/* deleted inode */
	if (inode->i_nlink == 0)
		clear_inline_node(node_page);

	F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
	F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
	F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
	F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;

#ifdef CONFIG_F2FS_CHECK_FS
	f2fs_inode_chksum_set(F2FS_I_SB(inode), node_page);
#endif
}
예제 #14
0
/*
 * Caller should grab and release a rwsem by calling f2fs_lock_op() and
 * f2fs_unlock_op().
 */
int __f2fs_add_link(struct inode *dir, const struct qstr *name,
						struct inode *inode)
{
	unsigned int bit_pos;
	unsigned int level;
	unsigned int current_depth;
	unsigned long bidx, block;
	f2fs_hash_t dentry_hash;
	struct f2fs_dir_entry *de;
	unsigned int nbucket, nblock;
	size_t namelen = name->len;
	struct page *dentry_page = NULL;
	struct f2fs_dentry_block *dentry_blk = NULL;
	int slots = GET_DENTRY_SLOTS(namelen);
	struct page *page;
	int err = 0;
	int i;

	dentry_hash = f2fs_dentry_hash(name);
	level = 0;
	current_depth = F2FS_I(dir)->i_current_depth;
	if (F2FS_I(dir)->chash == dentry_hash) {
		level = F2FS_I(dir)->clevel;
		F2FS_I(dir)->chash = 0;
	}

start:
	if (unlikely(current_depth == MAX_DIR_HASH_DEPTH))
		return -ENOSPC;

	/* Increase the depth, if required */
	if (level == current_depth)
		++current_depth;

	nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level);
	nblock = bucket_blocks(level);

	bidx = dir_block_index(level, F2FS_I(dir)->i_dir_level,
				(le32_to_cpu(dentry_hash) % nbucket));

	for (block = bidx; block <= (bidx + nblock - 1); block++) {
		dentry_page = get_new_data_page(dir, NULL, block, true);
		if (IS_ERR(dentry_page))
			return PTR_ERR(dentry_page);

		dentry_blk = kmap(dentry_page);
		bit_pos = room_for_filename(dentry_blk, slots);
		if (bit_pos < NR_DENTRY_IN_BLOCK)
			goto add_dentry;

		kunmap(dentry_page);
		f2fs_put_page(dentry_page, 1);
	}

	/* Move to next level to find the empty slot for new dentry */
	++level;
	goto start;
add_dentry:
	f2fs_wait_on_page_writeback(dentry_page, DATA);

	down_write(&F2FS_I(inode)->i_sem);
	page = init_inode_metadata(inode, dir, name);
	if (IS_ERR(page)) {
		err = PTR_ERR(page);
		goto fail;
	}
	de = &dentry_blk->dentry[bit_pos];
	de->hash_code = dentry_hash;
	de->name_len = cpu_to_le16(namelen);
	memcpy(dentry_blk->filename[bit_pos], name->name, name->len);
	de->ino = cpu_to_le32(inode->i_ino);
	set_de_type(de, inode);
	for (i = 0; i < slots; i++)
		test_and_set_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap);
	set_page_dirty(dentry_page);

	/* we don't need to mark_inode_dirty now */
	F2FS_I(inode)->i_pino = dir->i_ino;
	update_inode(inode, page);
	f2fs_put_page(page, 1);

	update_parent_metadata(dir, inode, current_depth);
fail:
	up_write(&F2FS_I(inode)->i_sem);

	if (is_inode_flag_set(F2FS_I(dir), FI_UPDATE_DIR)) {
		update_inode_page(dir);
		clear_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
	}
	kunmap(dentry_page);
	f2fs_put_page(dentry_page, 1);
	return err;
}
예제 #15
0
static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
				void *txattr_addr, struct page *ipage)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	size_t inline_size = inline_xattr_size(inode);
	struct page *in_page = NULL;
	void *xattr_addr;
	void *inline_addr = NULL;
	struct page *xpage;
	nid_t new_nid = 0;
	int err = 0;

	if (hsize > inline_size && !F2FS_I(inode)->i_xattr_nid)
		if (!alloc_nid(sbi, &new_nid))
			return -ENOSPC;

	/* write to inline xattr */
	if (inline_size) {
		if (ipage) {
			inline_addr = inline_xattr_addr(inode, ipage);
		} else {
			in_page = get_node_page(sbi, inode->i_ino);
			if (IS_ERR(in_page)) {
				alloc_nid_failed(sbi, new_nid);
				return PTR_ERR(in_page);
			}
			inline_addr = inline_xattr_addr(inode, in_page);
		}

		f2fs_wait_on_page_writeback(ipage ? ipage : in_page,
							NODE, true);
		/* no need to use xattr node block */
		if (hsize <= inline_size) {
			err = truncate_xattr_node(inode);
			alloc_nid_failed(sbi, new_nid);
			if (err) {
				f2fs_put_page(in_page, 1);
				return err;
			}
			memcpy(inline_addr, txattr_addr, inline_size);
			set_page_dirty(ipage ? ipage : in_page);
			goto in_page_out;
		}
	}

	/* write to xattr node block */
	if (F2FS_I(inode)->i_xattr_nid) {
		xpage = get_node_page(sbi, F2FS_I(inode)->i_xattr_nid);
		if (IS_ERR(xpage)) {
			alloc_nid_failed(sbi, new_nid);
			goto in_page_out;
		}
		f2fs_bug_on(sbi, new_nid);
		f2fs_wait_on_page_writeback(xpage, NODE, true);
	} else {
		struct dnode_of_data dn;
		set_new_dnode(&dn, inode, NULL, NULL, new_nid);
		xpage = new_node_page(&dn, XATTR_NODE_OFFSET);
		if (IS_ERR(xpage)) {
			alloc_nid_failed(sbi, new_nid);
			goto in_page_out;
		}
		alloc_nid_done(sbi, new_nid);
	}
	xattr_addr = page_address(xpage);

	if (inline_size)
		memcpy(inline_addr, txattr_addr, inline_size);
	memcpy(xattr_addr, txattr_addr + inline_size, VALID_XATTR_BLOCK_SIZE);

	if (inline_size)
		set_page_dirty(ipage ? ipage : in_page);
	set_page_dirty(xpage);

	f2fs_put_page(xpage, 1);
in_page_out:
	f2fs_put_page(in_page, 1);
	return err;
}
예제 #16
0
파일: inline.c 프로젝트: mdamt/linux
int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
				const struct qstr *orig_name,
				struct inode *inode, nid_t ino, umode_t mode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
	struct page *ipage;
	unsigned int bit_pos;
	f2fs_hash_t name_hash;
	struct f2fs_inline_dentry *inline_dentry = NULL;
	struct f2fs_dentry_ptr d;
	int slots = GET_DENTRY_SLOTS(new_name->len);
	struct page *page = NULL;
	int err = 0;

	ipage = get_node_page(sbi, dir->i_ino);
	if (IS_ERR(ipage))
		return PTR_ERR(ipage);

	inline_dentry = inline_data_addr(ipage);
	bit_pos = room_for_filename(&inline_dentry->dentry_bitmap,
						slots, NR_INLINE_DENTRY);
	if (bit_pos >= NR_INLINE_DENTRY) {
		err = f2fs_convert_inline_dir(dir, ipage, inline_dentry);
		if (err)
			return err;
		err = -EAGAIN;
		goto out;
	}

	if (inode) {
		down_write(&F2FS_I(inode)->i_sem);
		page = init_inode_metadata(inode, dir, new_name,
						orig_name, ipage);
		if (IS_ERR(page)) {
			err = PTR_ERR(page);
			goto fail;
		}
	}

	f2fs_wait_on_page_writeback(ipage, NODE, true);

	name_hash = f2fs_dentry_hash(new_name, NULL);
	make_dentry_ptr_inline(NULL, &d, inline_dentry);
	f2fs_update_dentry(ino, mode, &d, new_name, name_hash, bit_pos);

	set_page_dirty(ipage);

	/* we don't need to mark_inode_dirty now */
	if (inode) {
		f2fs_i_pino_write(inode, dir->i_ino);
		f2fs_put_page(page, 1);
	}

	update_parent_metadata(dir, inode, 0);
fail:
	if (inode)
		up_write(&F2FS_I(inode)->i_sem);
out:
	f2fs_put_page(ipage, 1);
	return err;
}
예제 #17
0
파일: inline.c 프로젝트: mdamt/linux
int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
{
	struct f2fs_io_info fio = {
		.sbi = F2FS_I_SB(dn->inode),
		.type = DATA,
		.op = REQ_OP_WRITE,
		.op_flags = REQ_SYNC | REQ_PRIO,
		.page = page,
		.encrypted_page = NULL,
	};
	int dirty, err;

	if (!f2fs_exist_data(dn->inode))
		goto clear_out;

	err = f2fs_reserve_block(dn, 0);
	if (err)
		return err;

	f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page));

	read_inline_data(page, dn->inode_page);
	set_page_dirty(page);

	/* clear dirty state */
	dirty = clear_page_dirty_for_io(page);

	/* write data page to try to make data consistent */
	set_page_writeback(page);
	fio.old_blkaddr = dn->data_blkaddr;
	set_inode_flag(dn->inode, FI_HOT_DATA);
	write_data_page(dn, &fio);
	f2fs_wait_on_page_writeback(page, DATA, true);
	if (dirty) {
		inode_dec_dirty_pages(dn->inode);
		remove_dirty_inode(dn->inode);
	}

	/* this converted inline_data should be recovered. */
	set_inode_flag(dn->inode, FI_APPEND_WRITE);

	/* clear inline data and flag after data writeback */
	truncate_inline_inode(dn->inode, dn->inode_page, 0);
	clear_inline_node(dn->inode_page);
clear_out:
	stat_dec_inline_inode(dn->inode);
	clear_inode_flag(dn->inode, FI_INLINE_DATA);
	f2fs_put_dnode(dn);
	return 0;
}

int f2fs_convert_inline_inode(struct inode *inode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct dnode_of_data dn;
	struct page *ipage, *page;
	int err = 0;

	if (!f2fs_has_inline_data(inode))
		return 0;

	page = f2fs_grab_cache_page(inode->i_mapping, 0, false);
	if (!page)
		return -ENOMEM;

	f2fs_lock_op(sbi);

	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto out;
	}

	set_new_dnode(&dn, inode, ipage, ipage, 0);

	if (f2fs_has_inline_data(inode))
		err = f2fs_convert_inline_page(&dn, page);

	f2fs_put_dnode(&dn);
out:
	f2fs_unlock_op(sbi);

	f2fs_put_page(page, 1);

	f2fs_balance_fs(sbi, dn.node_changed);

	return err;
}

int f2fs_write_inline_data(struct inode *inode, struct page *page)
{
	void *src_addr, *dst_addr;
	struct dnode_of_data dn;
	int err;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
	err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
	if (err)
		return err;

	if (!f2fs_has_inline_data(inode)) {
		f2fs_put_dnode(&dn);
		return -EAGAIN;
	}

	f2fs_bug_on(F2FS_I_SB(inode), page->index);

	f2fs_wait_on_page_writeback(dn.inode_page, NODE, true);
	src_addr = kmap_atomic(page);
	dst_addr = inline_data_addr(dn.inode_page);
	memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
	kunmap_atomic(src_addr);
	set_page_dirty(dn.inode_page);

	set_inode_flag(inode, FI_APPEND_WRITE);
	set_inode_flag(inode, FI_DATA_EXIST);

	clear_inline_node(dn.inode_page);
	f2fs_put_dnode(&dn);
	return 0;
}
예제 #18
0
static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
						struct vm_fault *vmf)
{
	struct page *page = vmf->page;
	struct inode *inode = file_inode(vma->vm_file);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct dnode_of_data dn;
	int err;

	f2fs_balance_fs(sbi);

	sb_start_pagefault(inode->i_sb);

	f2fs_bug_on(sbi, f2fs_has_inline_data(inode));

	/* block allocation */
	f2fs_lock_op(sbi);
	set_new_dnode(&dn, inode, NULL, NULL, 0);
	err = f2fs_reserve_block(&dn, page->index);
	if (err) {
		f2fs_unlock_op(sbi);
		goto out;
	}
	f2fs_put_dnode(&dn);
	f2fs_unlock_op(sbi);

	file_update_time(vma->vm_file);
	lock_page(page);
	if (unlikely(page->mapping != inode->i_mapping ||
			page_offset(page) > i_size_read(inode) ||
			!PageUptodate(page))) {
		unlock_page(page);
		err = -EFAULT;
		goto out;
	}

	/*
	 * check to see if the page is mapped already (no holes)
	 */
	if (PageMappedToDisk(page))
		goto mapped;

	/* page is wholly or partially inside EOF */
	if (((loff_t)(page->index + 1) << PAGE_CACHE_SHIFT) >
						i_size_read(inode)) {
		unsigned offset;
		offset = i_size_read(inode) & ~PAGE_CACHE_MASK;
		zero_user_segment(page, offset, PAGE_CACHE_SIZE);
	}
	set_page_dirty(page);
	SetPageUptodate(page);

	trace_f2fs_vm_page_mkwrite(page, DATA);
mapped:
	/* fill the page */
	f2fs_wait_on_page_writeback(page, DATA);

	/* wait for GCed encrypted page writeback */
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		f2fs_wait_on_encrypted_page_writeback(sbi, dn.data_blkaddr);

	/* if gced page is attached, don't write to cold segment */
	clear_cold_data(page);
out:
	sb_end_pagefault(inode->i_sb);
	return block_page_mkwrite_return(err);
}
예제 #19
0
파일: data.c 프로젝트: anrqkdrnl/detonator
static int f2fs_write_begin(struct file *file, struct address_space *mapping,
		loff_t pos, unsigned len, unsigned flags,
		struct page **pagep, void **fsdata)
{
	struct inode *inode = mapping->host;
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct page *page = NULL;
	struct page *ipage;
	pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
	struct dnode_of_data dn;
	int err = 0;

	trace_f2fs_write_begin(inode, pos, len, flags);

	f2fs_balance_fs(sbi);

	/*
	 * We should check this at this moment to avoid deadlock on inode page
	 * and #0 page. The locking rule for inline_data conversion should be:
	 * lock_page(page #0) -> lock_page(inode_page)
	 */
	if (index != 0) {
		err = f2fs_convert_inline_inode(inode);
		if (err)
			goto fail;
	}
repeat:
	page = grab_cache_page_write_begin(mapping, index, flags);
	if (!page) {
		err = -ENOMEM;
		goto fail;
	}

	*pagep = page;

	f2fs_lock_op(sbi);

	/* check inline_data */
	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto unlock_fail;
	}

	set_new_dnode(&dn, inode, ipage, ipage, 0);

	if (f2fs_has_inline_data(inode)) {
		if (pos + len <= MAX_INLINE_DATA) {
			read_inline_data(page, ipage);
			set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
			sync_inode_page(&dn);
			goto put_next;
		}
		err = f2fs_convert_inline_page(&dn, page);
		if (err)
			goto put_fail;
	}

	err = f2fs_get_block(&dn, index);
	if (err)
		goto put_fail;
put_next:
	f2fs_put_dnode(&dn);
	f2fs_unlock_op(sbi);

	f2fs_wait_on_page_writeback(page, DATA);

	if (len == PAGE_CACHE_SIZE)
		goto out_update;
	if (PageUptodate(page))
		goto out_clear;

	if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
		unsigned start = pos & (PAGE_CACHE_SIZE - 1);
		unsigned end = start + len;

		/* Reading beyond i_size is simple: memset to zero */
		zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
		goto out_update;
	}

	if (dn.data_blkaddr == NEW_ADDR) {
		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
	} else {
		struct f2fs_io_info fio = {
			.sbi = sbi,
			.type = DATA,
			.rw = READ_SYNC,
			.blk_addr = dn.data_blkaddr,
			.page = page,
			.encrypted_page = NULL,
		};
		err = f2fs_submit_page_bio(&fio);
		if (err)
			goto fail;

		lock_page(page);
		if (unlikely(!PageUptodate(page))) {
			err = -EIO;
			goto fail;
		}
		if (unlikely(page->mapping != mapping)) {
			f2fs_put_page(page, 1);
			goto repeat;
		}

		/* avoid symlink page */
		if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
			err = f2fs_decrypt_one(inode, page);
			if (err)
				goto fail;
		}
	}
out_update:
	SetPageUptodate(page);
out_clear:
	clear_cold_data(page);
	return 0;

put_fail:
	f2fs_put_dnode(&dn);
unlock_fail:
	f2fs_unlock_op(sbi);
fail:
	f2fs_put_page(page, 1);
	f2fs_write_failed(mapping, pos + len);
	return err;
}

static int f2fs_write_end(struct file *file,
			struct address_space *mapping,
			loff_t pos, unsigned len, unsigned copied,
			struct page *page, void *fsdata)
{
	struct inode *inode = page->mapping->host;

	trace_f2fs_write_end(inode, pos, len, copied);

	set_page_dirty(page);

	if (pos + copied > i_size_read(inode)) {
		i_size_write(inode, pos + copied);
		mark_inode_dirty(inode);
		update_inode_page(inode);
	}

	f2fs_put_page(page, 1);
	return copied;
}

static ssize_t check_direct_IO(struct inode *inode, int rw,
		const struct iovec *iov, loff_t offset, unsigned long nr_segs)
{
	unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
	int seg, i;
	size_t size;
	unsigned long addr;
	ssize_t retval = -EINVAL;
	loff_t end = offset;

	if (offset & blocksize_mask)
		return -EINVAL;

	/* Check the memory alignment.  Blocks cannot straddle pages */
	for (seg = 0; seg < nr_segs; seg++) {
		addr = (unsigned long)iov[seg].iov_base;
		size = iov[seg].iov_len;
		end += size;
		if ((addr & blocksize_mask) || (size & blocksize_mask))
			goto out;

		/* If this is a write we don't need to check anymore */
		if (rw & WRITE)
			continue;

		/*
		 * Check to make sure we don't have duplicate iov_base's in this
		 * iovec, if so return EINVAL, otherwise we'll get csum errors
		 * when reading back.
		 */
		for (i = seg + 1; i < nr_segs; i++) {
			if (iov[seg].iov_base == iov[i].iov_base)
				goto out;
		}
	}
	retval = 0;
out:
	return retval;
}
예제 #20
0
static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
				void *txattr_addr, struct page *ipage)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	size_t inline_size = 0;
	void *xattr_addr;
	struct page *xpage;
	nid_t new_nid = 0;
	int err;

	inline_size = inline_xattr_size(inode);

	if (hsize > inline_size && !F2FS_I(inode)->i_xattr_nid)
		if (!alloc_nid(sbi, &new_nid))
			return -ENOSPC;

	/* write to inline xattr */
	if (inline_size) {
		struct page *page = NULL;
		void *inline_addr;

		if (ipage) {
			inline_addr = inline_xattr_addr(ipage);
			f2fs_wait_on_page_writeback(ipage, NODE, true);
			set_page_dirty(ipage);
		} else {
			page = get_node_page(sbi, inode->i_ino);
			if (IS_ERR(page)) {
				alloc_nid_failed(sbi, new_nid);
				return PTR_ERR(page);
			}
			inline_addr = inline_xattr_addr(page);
			f2fs_wait_on_page_writeback(page, NODE, true);
		}
		memcpy(inline_addr, txattr_addr, inline_size);
		f2fs_put_page(page, 1);

		/* no need to use xattr node block */
		if (hsize <= inline_size) {
			err = truncate_xattr_node(inode, ipage);
			alloc_nid_failed(sbi, new_nid);
			return err;
		}
	}

	/* write to xattr node block */
	if (F2FS_I(inode)->i_xattr_nid) {
		xpage = get_node_page(sbi, F2FS_I(inode)->i_xattr_nid);
		if (IS_ERR(xpage)) {
			alloc_nid_failed(sbi, new_nid);
			return PTR_ERR(xpage);
		}
		f2fs_bug_on(sbi, new_nid);
		f2fs_wait_on_page_writeback(xpage, NODE, true);
	} else {
		struct dnode_of_data dn;
		set_new_dnode(&dn, inode, NULL, NULL, new_nid);
		xpage = new_node_page(&dn, XATTR_NODE_OFFSET, ipage);
		if (IS_ERR(xpage)) {
			alloc_nid_failed(sbi, new_nid);
			return PTR_ERR(xpage);
		}
		alloc_nid_done(sbi, new_nid);
	}

	xattr_addr = page_address(xpage);
	memcpy(xattr_addr, txattr_addr + inline_size, PAGE_SIZE -
						sizeof(struct node_footer));
	set_page_dirty(xpage);
	f2fs_put_page(xpage, 1);

	/* need to checkpoint during fsync */
	F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi));
	return 0;
}
예제 #21
0
int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name,
						struct inode *inode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
	struct page *ipage;
	unsigned int bit_pos;
	f2fs_hash_t name_hash;
	struct f2fs_dir_entry *de;
	size_t namelen = name->len;
	struct f2fs_inline_dentry *dentry_blk = NULL;
	int slots = GET_DENTRY_SLOTS(namelen);
	struct page *page;
	int err = 0;
	int i;

	name_hash = f2fs_dentry_hash(name);

	ipage = get_node_page(sbi, dir->i_ino);
	if (IS_ERR(ipage))
		return PTR_ERR(ipage);

	dentry_blk = inline_data_addr(ipage);
	bit_pos = room_for_filename(&dentry_blk->dentry_bitmap,
						slots, NR_INLINE_DENTRY);
	if (bit_pos >= NR_INLINE_DENTRY) {
		err = f2fs_convert_inline_dir(dir, ipage, dentry_blk);
		if (!err)
			err = -EAGAIN;
		goto out;
	}

	down_write(&F2FS_I(inode)->i_sem);
	page = init_inode_metadata(inode, dir, name, ipage);
	if (IS_ERR(page)) {
		err = PTR_ERR(page);
		goto fail;
	}

	f2fs_wait_on_page_writeback(ipage, NODE);
	de = &dentry_blk->dentry[bit_pos];
	de->hash_code = name_hash;
	de->name_len = cpu_to_le16(namelen);
	memcpy(dentry_blk->filename[bit_pos], name->name, name->len);
	de->ino = cpu_to_le32(inode->i_ino);
	set_de_type(de, inode);
	for (i = 0; i < slots; i++)
		test_and_set_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap);
	set_page_dirty(ipage);

	/* we don't need to mark_inode_dirty now */
	F2FS_I(inode)->i_pino = dir->i_ino;
	update_inode(inode, page);
	f2fs_put_page(page, 1);

	update_parent_metadata(dir, inode, 0);
fail:
	up_write(&F2FS_I(inode)->i_sem);

	if (is_inode_flag_set(F2FS_I(dir), FI_UPDATE_DIR)) {
		update_inode(dir, ipage);
		clear_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
	}
out:
	f2fs_put_page(ipage, 1);
	return err;
}