コード例 #1
0
ファイル: data.c プロジェクト: aejsmith/linux
struct page *get_read_data_page(struct inode *inode, pgoff_t index, int rw)
{
    struct address_space *mapping = inode->i_mapping;
    struct dnode_of_data dn;
    struct page *page;
    struct extent_info ei;
    int err;
    struct f2fs_io_info fio = {
        .sbi = F2FS_I_SB(inode),
        .type = DATA,
        .rw = rw,
        .encrypted_page = NULL,
    };

    if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
        return read_mapping_page(mapping, index, NULL);

    page = grab_cache_page(mapping, index);
    if (!page)
        return ERR_PTR(-ENOMEM);

    if (f2fs_lookup_extent_cache(inode, index, &ei)) {
        dn.data_blkaddr = ei.blk + index - ei.fofs;
        goto got_it;
    }

    set_new_dnode(&dn, inode, NULL, NULL, 0);
    err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
    if (err)
        goto put_err;
    f2fs_put_dnode(&dn);

    if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
        err = -ENOENT;
        goto put_err;
    }
got_it:
    if (PageUptodate(page)) {
        unlock_page(page);
        return page;
    }

    /*
     * A new dentry page is allocated but not able to be written, since its
     * new inode page couldn't be allocated due to -ENOSPC.
     * In such the case, its blkaddr can be remained as NEW_ADDR.
     * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
     */
    if (dn.data_blkaddr == NEW_ADDR) {
        zero_user_segment(page, 0, PAGE_CACHE_SIZE);
        SetPageUptodate(page);
        unlock_page(page);
        return page;
    }

    fio.blk_addr = dn.data_blkaddr;
    fio.page = page;
    err = f2fs_submit_page_bio(&fio);
    if (err)
        goto put_err;
    return page;

put_err:
    f2fs_put_page(page, 1);
    return ERR_PTR(err);
}
コード例 #2
0
static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
{
	unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
	struct curseg_info *curseg;
	struct page *page;
	block_t blkaddr;
	int err = 0;

	/* get node pages in the current segment */
	curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);

	/* read node page */
	page = alloc_page(GFP_F2FS_ZERO);
	if (!page)
		return -ENOMEM;
	lock_page(page);

	while (1) {
		struct fsync_inode_entry *entry;

		err = f2fs_submit_page_bio(sbi, page, blkaddr, READ_SYNC);
		if (err)
			return err;

		lock_page(page);

		if (cp_ver != cpver_of_node(page))
			break;

		if (!is_fsync_dnode(page))
			goto next;

		entry = get_fsync_inode(head, ino_of_node(page));
		if (entry) {
			if (IS_INODE(page) && is_dent_dnode(page))
				set_inode_flag(F2FS_I(entry->inode),
							FI_INC_LINK);
		} else {
			if (IS_INODE(page) && is_dent_dnode(page)) {
				err = recover_inode_page(sbi, page);
				if (err) {
					f2fs_msg(sbi->sb, KERN_INFO,
					 "%s: recover_inode_page failed: %d",
								__func__, err);
					break;
				}
			}

			/* add this fsync inode to the list */
			entry = kmem_cache_alloc(fsync_entry_slab, GFP_NOFS);
			if (!entry) {
				err = -ENOMEM;
				break;
			}

			entry->inode = f2fs_iget(sbi->sb, ino_of_node(page));
			if (IS_ERR(entry->inode)) {
				err = PTR_ERR(entry->inode);
				f2fs_msg(sbi->sb, KERN_INFO,
					"%s: f2fs_iget failed: %d",
					__func__, err);
				kmem_cache_free(fsync_entry_slab, entry);
				break;
			}
			list_add_tail(&entry->list, head);
		}
		entry->blkaddr = blkaddr;

		err = recover_inode(entry->inode, page);
		if (err && err != -ENOENT) {
			f2fs_msg(sbi->sb, KERN_INFO,
				"%s: recover_inode failed: %d",
				__func__, err);
			break;
		}
next:
		/* check next segment */
		blkaddr = next_blkaddr_of_node(page);
	}

	unlock_page(page);
	__free_pages(page, 0);

	return err;
}
コード例 #3
0
ファイル: data.c プロジェクト: aejsmith/linux
static int f2fs_write_begin(struct file *file, struct address_space *mapping,
                            loff_t pos, unsigned len, unsigned flags,
                            struct page **pagep, void **fsdata)
{
    struct inode *inode = mapping->host;
    struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
    struct page *page = NULL;
    struct page *ipage;
    pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
    struct dnode_of_data dn;
    int err = 0;

    trace_f2fs_write_begin(inode, pos, len, flags);

    f2fs_balance_fs(sbi);

    /*
     * We should check this at this moment to avoid deadlock on inode page
     * and #0 page. The locking rule for inline_data conversion should be:
     * lock_page(page #0) -> lock_page(inode_page)
     */
    if (index != 0) {
        err = f2fs_convert_inline_inode(inode);
        if (err)
            goto fail;
    }
repeat:
    page = grab_cache_page_write_begin(mapping, index, flags);
    if (!page) {
        err = -ENOMEM;
        goto fail;
    }

    *pagep = page;

    f2fs_lock_op(sbi);

    /* check inline_data */
    ipage = get_node_page(sbi, inode->i_ino);
    if (IS_ERR(ipage)) {
        err = PTR_ERR(ipage);
        goto unlock_fail;
    }

    set_new_dnode(&dn, inode, ipage, ipage, 0);

    if (f2fs_has_inline_data(inode)) {
        if (pos + len <= MAX_INLINE_DATA) {
            read_inline_data(page, ipage);
            set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
            sync_inode_page(&dn);
            goto put_next;
        }
        err = f2fs_convert_inline_page(&dn, page);
        if (err)
            goto put_fail;
    }

    err = f2fs_get_block(&dn, index);
    if (err)
        goto put_fail;
put_next:
    f2fs_put_dnode(&dn);
    f2fs_unlock_op(sbi);

    f2fs_wait_on_page_writeback(page, DATA);

    if (len == PAGE_CACHE_SIZE)
        goto out_update;
    if (PageUptodate(page))
        goto out_clear;

    if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
        unsigned start = pos & (PAGE_CACHE_SIZE - 1);
        unsigned end = start + len;

        /* Reading beyond i_size is simple: memset to zero */
        zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
        goto out_update;
    }

    if (dn.data_blkaddr == NEW_ADDR) {
        zero_user_segment(page, 0, PAGE_CACHE_SIZE);
    } else {
        struct f2fs_io_info fio = {
            .sbi = sbi,
            .type = DATA,
            .rw = READ_SYNC,
            .blk_addr = dn.data_blkaddr,
            .page = page,
            .encrypted_page = NULL,
        };
        err = f2fs_submit_page_bio(&fio);
        if (err)
            goto fail;

        lock_page(page);
        if (unlikely(!PageUptodate(page))) {
            err = -EIO;
            goto fail;
        }
        if (unlikely(page->mapping != mapping)) {
            f2fs_put_page(page, 1);
            goto repeat;
        }

        /* avoid symlink page */
        if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
            err = f2fs_decrypt_one(inode, page);
            if (err)
                goto fail;
        }
    }
out_update:
    SetPageUptodate(page);
out_clear:
    clear_cold_data(page);
    return 0;

put_fail:
    f2fs_put_dnode(&dn);
unlock_fail:
    f2fs_unlock_op(sbi);
fail:
    f2fs_put_page(page, 1);
    f2fs_write_failed(mapping, pos + len);
    return err;
}

static int f2fs_write_end(struct file *file,
                          struct address_space *mapping,
                          loff_t pos, unsigned len, unsigned copied,
                          struct page *page, void *fsdata)
{
    struct inode *inode = page->mapping->host;

    trace_f2fs_write_end(inode, pos, len, copied);

    set_page_dirty(page);

    if (pos + copied > i_size_read(inode)) {
        i_size_write(inode, pos + copied);
        mark_inode_dirty(inode);
        update_inode_page(inode);
    }

    f2fs_put_page(page, 1);
    return copied;
}

static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
                           loff_t offset)
{
    unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;

    if (offset & blocksize_mask)
        return -EINVAL;

    if (iov_iter_alignment(iter) & blocksize_mask)
        return -EINVAL;

    return 0;
}
コード例 #4
0
ファイル: data.c プロジェクト: anrqkdrnl/detonator
static int f2fs_write_begin(struct file *file, struct address_space *mapping,
		loff_t pos, unsigned len, unsigned flags,
		struct page **pagep, void **fsdata)
{
	struct inode *inode = mapping->host;
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct page *page = NULL;
	struct page *ipage;
	pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
	struct dnode_of_data dn;
	int err = 0;

	trace_f2fs_write_begin(inode, pos, len, flags);

	f2fs_balance_fs(sbi);

	/*
	 * We should check this at this moment to avoid deadlock on inode page
	 * and #0 page. The locking rule for inline_data conversion should be:
	 * lock_page(page #0) -> lock_page(inode_page)
	 */
	if (index != 0) {
		err = f2fs_convert_inline_inode(inode);
		if (err)
			goto fail;
	}
repeat:
	page = grab_cache_page_write_begin(mapping, index, flags);
	if (!page) {
		err = -ENOMEM;
		goto fail;
	}

	*pagep = page;

	f2fs_lock_op(sbi);

	/* check inline_data */
	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage)) {
		err = PTR_ERR(ipage);
		goto unlock_fail;
	}

	set_new_dnode(&dn, inode, ipage, ipage, 0);

	if (f2fs_has_inline_data(inode)) {
		if (pos + len <= MAX_INLINE_DATA) {
			read_inline_data(page, ipage);
			set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
			sync_inode_page(&dn);
			goto put_next;
		}
		err = f2fs_convert_inline_page(&dn, page);
		if (err)
			goto put_fail;
	}

	err = f2fs_get_block(&dn, index);
	if (err)
		goto put_fail;
put_next:
	f2fs_put_dnode(&dn);
	f2fs_unlock_op(sbi);

	f2fs_wait_on_page_writeback(page, DATA);

	if (len == PAGE_CACHE_SIZE)
		goto out_update;
	if (PageUptodate(page))
		goto out_clear;

	if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
		unsigned start = pos & (PAGE_CACHE_SIZE - 1);
		unsigned end = start + len;

		/* Reading beyond i_size is simple: memset to zero */
		zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
		goto out_update;
	}

	if (dn.data_blkaddr == NEW_ADDR) {
		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
	} else {
		struct f2fs_io_info fio = {
			.sbi = sbi,
			.type = DATA,
			.rw = READ_SYNC,
			.blk_addr = dn.data_blkaddr,
			.page = page,
			.encrypted_page = NULL,
		};
		err = f2fs_submit_page_bio(&fio);
		if (err)
			goto fail;

		lock_page(page);
		if (unlikely(!PageUptodate(page))) {
			err = -EIO;
			goto fail;
		}
		if (unlikely(page->mapping != mapping)) {
			f2fs_put_page(page, 1);
			goto repeat;
		}

		/* avoid symlink page */
		if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
			err = f2fs_decrypt_one(inode, page);
			if (err)
				goto fail;
		}
	}
out_update:
	SetPageUptodate(page);
out_clear:
	clear_cold_data(page);
	return 0;

put_fail:
	f2fs_put_dnode(&dn);
unlock_fail:
	f2fs_unlock_op(sbi);
fail:
	f2fs_put_page(page, 1);
	f2fs_write_failed(mapping, pos + len);
	return err;
}

static int f2fs_write_end(struct file *file,
			struct address_space *mapping,
			loff_t pos, unsigned len, unsigned copied,
			struct page *page, void *fsdata)
{
	struct inode *inode = page->mapping->host;

	trace_f2fs_write_end(inode, pos, len, copied);

	set_page_dirty(page);

	if (pos + copied > i_size_read(inode)) {
		i_size_write(inode, pos + copied);
		mark_inode_dirty(inode);
		update_inode_page(inode);
	}

	f2fs_put_page(page, 1);
	return copied;
}

static ssize_t check_direct_IO(struct inode *inode, int rw,
		const struct iovec *iov, loff_t offset, unsigned long nr_segs)
{
	unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
	int seg, i;
	size_t size;
	unsigned long addr;
	ssize_t retval = -EINVAL;
	loff_t end = offset;

	if (offset & blocksize_mask)
		return -EINVAL;

	/* Check the memory alignment.  Blocks cannot straddle pages */
	for (seg = 0; seg < nr_segs; seg++) {
		addr = (unsigned long)iov[seg].iov_base;
		size = iov[seg].iov_len;
		end += size;
		if ((addr & blocksize_mask) || (size & blocksize_mask))
			goto out;

		/* If this is a write we don't need to check anymore */
		if (rw & WRITE)
			continue;

		/*
		 * Check to make sure we don't have duplicate iov_base's in this
		 * iovec, if so return EINVAL, otherwise we'll get csum errors
		 * when reading back.
		 */
		for (i = seg + 1; i < nr_segs; i++) {
			if (iov[seg].iov_base == iov[i].iov_base)
				goto out;
		}
	}
	retval = 0;
out:
	return retval;
}