Exemplo n.º 1
0
static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
						struct vm_fault *vmf)
{
	struct page *page = vmf->page;
	struct inode *inode = file_inode(vma->vm_file);
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct dnode_of_data dn;
	int err;

	f2fs_balance_fs(sbi);

	vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);

	f2fs_bug_on(sbi, f2fs_has_inline_data(inode));

	/* block allocation */
	f2fs_lock_op(sbi);
	set_new_dnode(&dn, inode, NULL, NULL, 0);
	err = f2fs_reserve_block(&dn, page->index);
	if (err) {
		f2fs_unlock_op(sbi);
		goto out;
	}
	f2fs_put_dnode(&dn);
	f2fs_unlock_op(sbi);

	file_update_time(vma->vm_file);
	lock_page(page);
	if (unlikely(page->mapping != inode->i_mapping ||
			page_offset(page) > i_size_read(inode) ||
			!PageUptodate(page))) {
		unlock_page(page);
		err = -EFAULT;
		goto out;
	}

	/*
	 * check to see if the page is mapped already (no holes)
	 */
	if (PageMappedToDisk(page))
		goto mapped;

	/* page is wholly or partially inside EOF */
	if (((page->index + 1) << PAGE_CACHE_SHIFT) > i_size_read(inode)) {
		unsigned offset;
		offset = i_size_read(inode) & ~PAGE_CACHE_MASK;
		zero_user_segment(page, offset, PAGE_CACHE_SIZE);
	}
	set_page_dirty(page);
	SetPageUptodate(page);

	trace_f2fs_vm_page_mkwrite(page, DATA);
mapped:
	/* fill the page */
	f2fs_wait_on_page_writeback(page, DATA);
	/* if gced page is attached, don't write to cold segment */
	clear_cold_data(page);
out:
	return block_page_mkwrite_return(err);
}
Exemplo n.º 2
0
static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	struct page *page = vmf->page;
	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
	struct gfs2_inode *ip = GFS2_I(inode);
	struct gfs2_sbd *sdp = GFS2_SB(inode);
	unsigned long last_index;
	u64 pos = page->index << PAGE_CACHE_SHIFT;
	unsigned int data_blocks, ind_blocks, rblocks;
	struct gfs2_holder gh;
	struct gfs2_qadata *qa;
	loff_t size;
	int ret;

	/* Wait if fs is frozen. This is racy so we check again later on
	 * and retry if the fs has been frozen after the page lock has
	 * been acquired
	 */
	vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);

	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
	ret = gfs2_glock_nq(&gh);
	if (ret)
		goto out;

	set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
	set_bit(GIF_SW_PAGED, &ip->i_flags);

	if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE)) {
		lock_page(page);
		if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
			ret = -EAGAIN;
			unlock_page(page);
		}
		goto out_unlock;
	}

	ret = -ENOMEM;
	qa = gfs2_qadata_get(ip);
	if (qa == NULL)
		goto out_unlock;

	ret = gfs2_quota_lock_check(ip);
	if (ret)
		goto out_alloc_put;
	gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
	ret = gfs2_inplace_reserve(ip, data_blocks + ind_blocks);
	if (ret)
		goto out_quota_unlock;

	rblocks = RES_DINODE + ind_blocks;
	if (gfs2_is_jdata(ip))
		rblocks += data_blocks ? data_blocks : 1;
	if (ind_blocks || data_blocks) {
		rblocks += RES_STATFS + RES_QUOTA;
		rblocks += gfs2_rg_blocks(ip);
	}
	ret = gfs2_trans_begin(sdp, rblocks, 0);
	if (ret)
		goto out_trans_fail;

	lock_page(page);
	ret = -EINVAL;
	size = i_size_read(inode);
	last_index = (size - 1) >> PAGE_CACHE_SHIFT;
	/* Check page index against inode size */
	if (size == 0 || (page->index > last_index))
		goto out_trans_end;

	ret = -EAGAIN;
	/* If truncated, we must retry the operation, we may have raced
	 * with the glock demotion code.
	 */
	if (!PageUptodate(page) || page->mapping != inode->i_mapping)
		goto out_trans_end;

	/* Unstuff, if required, and allocate backing blocks for page */
	ret = 0;
	if (gfs2_is_stuffed(ip))
		ret = gfs2_unstuff_dinode(ip, page);
	if (ret == 0)
		ret = gfs2_allocate_page_backing(page);

out_trans_end:
	if (ret)
		unlock_page(page);
	gfs2_trans_end(sdp);
out_trans_fail:
	gfs2_inplace_release(ip);
out_quota_unlock:
	gfs2_quota_unlock(ip);
out_alloc_put:
	gfs2_qadata_put(ip);
out_unlock:
	gfs2_glock_dq(&gh);
out:
	gfs2_holder_uninit(&gh);
	if (ret == 0) {
		set_page_dirty(page);
		wait_for_stable_page(page);
	}
	return block_page_mkwrite_return(ret);
}
Exemplo n.º 3
0
static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
    struct page *page = vmf->page;
    struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
    struct gfs2_inode *ip = GFS2_I(inode);
    struct gfs2_sbd *sdp = GFS2_SB(inode);
    unsigned long last_index;
    u64 pos = page->index << PAGE_CACHE_SHIFT;
    unsigned int data_blocks, ind_blocks, rblocks;
    struct gfs2_holder gh;
    struct gfs2_qadata *qa;
    loff_t size;
    int ret;

    vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);

    gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
    ret = gfs2_glock_nq(&gh);
    if (ret)
        goto out;

    set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
    set_bit(GIF_SW_PAGED, &ip->i_flags);

    if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE)) {
        lock_page(page);
        if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
            ret = -EAGAIN;
            unlock_page(page);
        }
        goto out_unlock;
    }

    ret = -ENOMEM;
    qa = gfs2_qadata_get(ip);
    if (qa == NULL)
        goto out_unlock;

    ret = gfs2_quota_lock_check(ip);
    if (ret)
        goto out_alloc_put;
    gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
    ret = gfs2_inplace_reserve(ip, data_blocks + ind_blocks);
    if (ret)
        goto out_quota_unlock;

    rblocks = RES_DINODE + ind_blocks;
    if (gfs2_is_jdata(ip))
        rblocks += data_blocks ? data_blocks : 1;
    if (ind_blocks || data_blocks) {
        rblocks += RES_STATFS + RES_QUOTA;
        rblocks += gfs2_rg_blocks(ip);
    }
    ret = gfs2_trans_begin(sdp, rblocks, 0);
    if (ret)
        goto out_trans_fail;

    lock_page(page);
    ret = -EINVAL;
    size = i_size_read(inode);
    last_index = (size - 1) >> PAGE_CACHE_SHIFT;

    if (size == 0 || (page->index > last_index))
        goto out_trans_end;

    ret = -EAGAIN;
    if (!PageUptodate(page) || page->mapping != inode->i_mapping)
        goto out_trans_end;


    ret = 0;
    if (gfs2_is_stuffed(ip))
        ret = gfs2_unstuff_dinode(ip, page);
    if (ret == 0)
        ret = gfs2_allocate_page_backing(page);

out_trans_end:
    if (ret)
        unlock_page(page);
    gfs2_trans_end(sdp);
out_trans_fail:
    gfs2_inplace_release(ip);
out_quota_unlock:
    gfs2_quota_unlock(ip);
out_alloc_put:
    gfs2_qadata_put(ip);
out_unlock:
    gfs2_glock_dq(&gh);
out:
    gfs2_holder_uninit(&gh);
    if (ret == 0) {
        set_page_dirty(page);

        if (inode->i_sb->s_frozen == SB_UNFROZEN) {
            wait_on_page_writeback(page);
        } else {
            ret = -EAGAIN;
            unlock_page(page);
        }
    }
    return block_page_mkwrite_return(ret);
}
Exemplo n.º 4
0
void rtl_vfs_check_frozen(struct super_block *sb, int level)
{	
	vfs_check_frozen(sb, level);
}
Exemplo n.º 5
0
static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
				    const struct iovec *iov,
				    unsigned long nr_segs,
				    loff_t pos)
{
	int ret, direct_io, appending, rw_level, have_alloc_sem  = 0;
	int can_do_direct, sync = 0;
	ssize_t written = 0;
	size_t ocount;		/* original count */
	size_t count;		/* after file limit checks */
	loff_t *ppos = &iocb->ki_pos;
	struct file *file = iocb->ki_filp;
	struct inode *inode = file->f_path.dentry->d_inode;

	mlog_entry("(0x%p, %u, '%.*s')\n", file,
		   (unsigned int)nr_segs,
		   file->f_path.dentry->d_name.len,
		   file->f_path.dentry->d_name.name);

	if (iocb->ki_left == 0)
		return 0;

	ret = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
	if (ret)
		return ret;

	count = ocount;

	vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);

	appending = file->f_flags & O_APPEND ? 1 : 0;
	direct_io = file->f_flags & O_DIRECT ? 1 : 0;

	mutex_lock(&inode->i_mutex);

relock:
	/* to match setattr's i_mutex -> i_alloc_sem -> rw_lock ordering */
	if (direct_io) {
		down_read(&inode->i_alloc_sem);
		have_alloc_sem = 1;
	}

	/* concurrent O_DIRECT writes are allowed */
	rw_level = !direct_io;
	ret = ocfs2_rw_lock(inode, rw_level);
	if (ret < 0) {
		mlog_errno(ret);
		goto out_sems;
	}

	can_do_direct = direct_io;
	ret = ocfs2_prepare_inode_for_write(file->f_path.dentry, ppos,
					    iocb->ki_left, appending,
					    &can_do_direct);
	if (ret < 0) {
		mlog_errno(ret);
		goto out;
	}

	/*
	 * We can't complete the direct I/O as requested, fall back to
	 * buffered I/O.
	 */
	if (direct_io && !can_do_direct) {
		ocfs2_rw_unlock(inode, rw_level);
		up_read(&inode->i_alloc_sem);

		have_alloc_sem = 0;
		rw_level = -1;

		direct_io = 0;
		sync = 1;
		goto relock;
	}

	if (!sync && ((file->f_flags & O_SYNC) || IS_SYNC(inode)))
		sync = 1;

	/*
	 * XXX: Is it ok to execute these checks a second time?
	 */
	ret = generic_write_checks(file, ppos, &count, S_ISBLK(inode->i_mode));
	if (ret)
		goto out;

	/*
	 * Set pos so that sync_page_range_nolock() below understands
	 * where to start from. We might've moved it around via the
	 * calls above. The range we want to actually sync starts from
	 * *ppos here.
	 *
	 */
	pos = *ppos;

	/* communicate with ocfs2_dio_end_io */
	ocfs2_iocb_set_rw_locked(iocb, rw_level);

	if (direct_io) {
		written = generic_file_direct_write(iocb, iov, &nr_segs, *ppos,
						    ppos, count, ocount);
		if (written < 0) {
			ret = written;
			goto out_dio;
		}
	} else {
		written = ocfs2_file_buffered_write(file, ppos, iov, nr_segs,
						    count, written);
		if (written < 0) {
			ret = written;
			if (ret != -EFAULT || ret != -ENOSPC)
				mlog_errno(ret);
			goto out;
		}
	}

out_dio:
	/* buffered aio wouldn't have proper lock coverage today */
	BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));

	/* 
	 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
	 * function pointer which is called when o_direct io completes so that
	 * it can unlock our rw lock.  (it's the clustered equivalent of
	 * i_alloc_sem; protects truncate from racing with pending ios).
	 * Unfortunately there are error cases which call end_io and others
	 * that don't.  so we don't have to unlock the rw_lock if either an
	 * async dio is going to do it in the future or an end_io after an
	 * error has already done it.
	 */
	if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
		rw_level = -1;
		have_alloc_sem = 0;
	}

out:
	if (rw_level != -1)
		ocfs2_rw_unlock(inode, rw_level);

out_sems:
	if (have_alloc_sem)
		up_read(&inode->i_alloc_sem);

	if (written > 0 && sync) {
		ssize_t err;

		err = sync_page_range_nolock(inode, file->f_mapping, pos, count);
		if (err < 0)
			written = err;
	}

	mutex_unlock(&inode->i_mutex);

	mlog_exit(ret);
	return written ? written : ret;
}
Exemplo n.º 6
0
static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
						struct vm_fault *vmf)
{
	struct page *page = vmf->page;
	struct inode *inode = file_inode(vma->vm_file);
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
	block_t old_blk_addr;
	struct dnode_of_data dn;
	int err, ilock;

	f2fs_balance_fs(sbi);

	/* F2FS backport: We replace in old kernels sb_start_pagefault(inode->i_sb) with vfs_check_frozen()
	 * and remove the original sb_end_pagefault(inode->i_sb) after the out label
	 *
	 * The introduction of sb_{start,end}_pagefault() was made post-3.2 kernels by Jan Kara
	 * and merged in commit a0e881b7c189fa2bd76c024dbff91e79511c971d.
	 * Discussed at https://lkml.org/lkml/2012/3/5/278
	 *
	 * - Alex
	 */
	vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);

	/* block allocation */
	ilock = mutex_lock_op(sbi);
	set_new_dnode(&dn, inode, NULL, NULL, 0);
	err = get_dnode_of_data(&dn, page->index, ALLOC_NODE);
	if (err) {
		mutex_unlock_op(sbi, ilock);
		goto out;
	}

	old_blk_addr = dn.data_blkaddr;

	if (old_blk_addr == NULL_ADDR) {
		err = reserve_new_block(&dn);
		if (err) {
			f2fs_put_dnode(&dn);
			mutex_unlock_op(sbi, ilock);
			goto out;
		}
	}
	f2fs_put_dnode(&dn);
	mutex_unlock_op(sbi, ilock);

	file_update_time(vma->vm_file);
	lock_page(page);
	if (page->mapping != inode->i_mapping ||
			page_offset(page) > i_size_read(inode) ||
			!PageUptodate(page)) {
		unlock_page(page);
		err = -EFAULT;
		goto out;
	}

	/*
	 * check to see if the page is mapped already (no holes)
	 */
	if (PageMappedToDisk(page))
		goto mapped;

	/* page is wholly or partially inside EOF */
	if (((page->index + 1) << PAGE_CACHE_SHIFT) > i_size_read(inode)) {
		unsigned offset;
		offset = i_size_read(inode) & ~PAGE_CACHE_MASK;
		zero_user_segment(page, offset, PAGE_CACHE_SIZE);
	}
	set_page_dirty(page);
	SetPageUptodate(page);

mapped:
	/* fill the page */
	wait_on_page_writeback(page);
out:
	return block_page_mkwrite_return(err);
}