示例#1
0
文件: file.c 项目: sjtutravel/hmfs
static int hmfs_release_file(struct inode *inode, struct file *filp)
{
//      TODO: Separate atomic and volatile or not
//      If we do separate /* some remained atomic pages should discarded */
//      Else:
//      set_inode_flag(HMFS_I(inode), FI_DROP_CACHE);
	filemap_fdatawrite(inode->i_mapping);
//      clear_inode_flag(HMFS_I(inode), FI_DROP_CACHE);
	return 0;
}
示例#2
0
static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
                                   unsigned long nr_segs, loff_t pos)
{
    struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
    ssize_t written;

    written = generic_file_aio_write(iocb, iov, nr_segs, pos);
    if (!CIFS_I(inode)->clientCanCacheAll)
        filemap_fdatawrite(inode->i_mapping);
    return written;
}
示例#3
0
void gfs2_meta_sync(struct gfs2_glock *gl)
{
	struct address_space *mapping = gl->gl_aspace->i_mapping;
	int error;

	filemap_fdatawrite(mapping);
	error = filemap_fdatawait(mapping);

	if (error)
		gfs2_io_error(gl->gl_sbd);
}
示例#4
0
static int f2fs_release_file(struct inode *inode, struct file *filp)
{
	/* some remained atomic pages should discarded */
	if (f2fs_is_atomic_file(inode))
		commit_inmem_pages(inode, true);
	if (f2fs_is_volatile_file(inode)) {
		set_inode_flag(F2FS_I(inode), FI_DROP_CACHE);
		filemap_fdatawrite(inode->i_mapping);
		clear_inode_flag(F2FS_I(inode), FI_DROP_CACHE);
	}
	return 0;
}
示例#5
0
static void inode_go_sync(struct gfs2_glock *gl)
{
	struct gfs2_inode *ip = gfs2_glock2inode(gl);
	int isreg = ip && S_ISREG(ip->i_inode.i_mode);
	struct address_space *metamapping = gfs2_glock2aspace(gl);
	int error;

	if (isreg) {
		if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
			unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
		inode_dio_wait(&ip->i_inode);
	}
	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
		goto out;

	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);

	gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
		       GFS2_LFC_INODE_GO_SYNC);
	filemap_fdatawrite(metamapping);
	if (isreg) {
		struct address_space *mapping = ip->i_inode.i_mapping;
		filemap_fdatawrite(mapping);
		error = filemap_fdatawait(mapping);
		mapping_set_error(mapping, error);
	}
	error = filemap_fdatawait(metamapping);
	mapping_set_error(metamapping, error);
	gfs2_ail_empty_gl(gl);
	/*
	 * Writeback of the data mapping may cause the dirty flag to be set
	 * so we have to clear it again here.
	 */
	smp_mb__before_atomic();
	clear_bit(GLF_DIRTY, &gl->gl_flags);

out:
	gfs2_clear_glop_pending(ip);
}
示例#6
0
static int
lofs_fsync(FSYNC_ARGS(struct file *file, 
                struct dentry *dentry, 
                loff_t start,
                loff_t end,
                int datasync))
{
    struct file *lower = lofs_file_to_lower(file);
    int result = 0;

    /* Make sure the LOFS pages are flushed out to the lower filesystem.
     * Different kernels have different utility functions to help with
     * this; in the worst case (2.6.9) we roll our own.
     */

#if defined(HAVE_OFFSET_IN_FSYNC)
    /* This is the 3.2.0+ version. */

    result = filemap_write_and_wait_range(file->f_mapping, start, end);
#elif defined(HAVE_FILEMAP_WRITE_AND_WAIT)
    /* This is the 2.6.18 - 3.2.0 version. */

    result = filemap_write_and_wait(file->f_mapping);
#else
    /* This is for versions prior to 2.6.18.  This is basically a copy of
     * the implementation of filemap_write_and_wait, which unfortunately was
     * not added until 2.6.18 or so.
     */

    if (file->f_mapping->nrpages) {
        result = filemap_fdatawrite(file->f_mapping);
        if (result != -EIO) {
            int result2 = filemap_fdatawait(file->f_mapping);
            if (result == 0) {
                result = result2;
            }
        }
    }
#endif
    if (result != 0) {
        return result;
    }

    /* Then give the lower filesystem a chance to do its own sync. */

    return FSYNC_HELPER(FSYNC_ARGS(lower, 
                    lofs_dentry_to_lower(dentry),
                    start,
                    end,
                    datasync));
}
示例#7
0
static void inode_go_sync(struct gfs2_glock *gl)
{
	struct gfs2_inode *ip = gl->gl_object;
	struct address_space *metamapping = gfs2_glock2aspace(gl);
	int error;

	if (ip && !S_ISREG(ip->i_inode.i_mode))
		ip = NULL;
	if (ip) {
		if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
			unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
		inode_dio_wait(&ip->i_inode);
	}
	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
		return;

	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);

	gfs2_log_flush(gl->gl_sbd, gl, NORMAL_FLUSH);
	filemap_fdatawrite(metamapping);
	if (ip) {
		struct address_space *mapping = ip->i_inode.i_mapping;
		filemap_fdatawrite(mapping);
		error = filemap_fdatawait(mapping);
		mapping_set_error(mapping, error);
	}
	error = filemap_fdatawait(metamapping);
	mapping_set_error(metamapping, error);
	gfs2_ail_empty_gl(gl);
	/*
	 * Writeback of the data mapping may cause the dirty flag to be set
	 * so we have to clear it again here.
	 */
	smp_mb__before_atomic();
	clear_bit(GLF_DIRTY, &gl->gl_flags);
}
示例#8
0
/**
 * v9fs_clear_inode - release an inode
 * @inode: inode to release
 *
 */
void v9fs_evict_inode(struct inode *inode)
{
	struct v9fs_inode *v9inode = V9FS_I(inode);

	truncate_inode_pages_final(inode->i_mapping);
	clear_inode(inode);
	filemap_fdatawrite(inode->i_mapping);

	v9fs_cache_inode_put_cookie(inode);
	/* clunk the fid stashed in writeback_fid */
	if (v9inode->writeback_fid) {
		p9_client_clunk(v9inode->writeback_fid);
		v9inode->writeback_fid = NULL;
	}
}
示例#9
0
static void rgrp_go_sync(struct gfs2_glock *gl)
{
	struct address_space *metamapping = gfs2_glock2aspace(gl);
	int error;

	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
		return;
	BUG_ON(gl->gl_state != LM_ST_EXCLUSIVE);

	gfs2_log_flush(gl->gl_sbd, gl);
	filemap_fdatawrite(metamapping);
	error = filemap_fdatawait(metamapping);
        mapping_set_error(metamapping, error);
	gfs2_ail_empty_gl(gl);
}
示例#10
0
/**
 * vfs_fsync - perform a fsync or fdatasync on a file
 * @file:		file to sync
 * @dentry:		dentry of @file
 * @data:		only perform a fdatasync operation
 *
 * Write back data and metadata for @file to disk.  If @datasync is
 * set only metadata needed to access modified file data is written.
 *
 * In case this function is called from nfsd @file may be %NULL and
 * only @dentry is set.  This can only happen when the filesystem
 * implements the export_operations API.
 */
int vfs_fsync(struct file *file, struct dentry *dentry, int datasync)
{
	const struct file_operations *fop;
	struct address_space *mapping;
	int err, ret;

	/*
	 * Get mapping and operations from the file in case we have
	 * as file, or get the default values for them in case we
	 * don't have a struct file available.  Damn nfsd..
	 */
	if (file) {
#ifdef CONFIG_KRG_FAF
		if (file->f_flags & O_FAF_CLT) {
			ret = krg_faf_fsync(file);
			goto out;
		}
#endif
		mapping = file->f_mapping;
		fop = file->f_op;
	} else {
		mapping = dentry->d_inode->i_mapping;
		fop = dentry->d_inode->i_fop;
	}

	if (!fop || !fop->fsync) {
		ret = -EINVAL;
		goto out;
	}

	ret = filemap_fdatawrite(mapping);

	/*
	 * We need to protect against concurrent writers, which could cause
	 * livelocks in fsync_buffers_list().
	 */
	mutex_lock(&mapping->host->i_mutex);
	err = fop->fsync(file, dentry, datasync);
	if (!ret)
		ret = err;
	mutex_unlock(&mapping->host->i_mutex);
	err = filemap_fdatawait(mapping);
	if (!ret)
		ret = err;
out:
	return ret;
}
/**
 * v9fs_clear_inode - release an inode
 * @inode: inode to release
 *
 */
void v9fs_evict_inode(struct inode *inode)
{
	struct v9fs_inode *v9inode = V9FS_I(inode);

	truncate_inode_pages(inode->i_mapping, 0);
	end_writeback(inode);
	filemap_fdatawrite(inode->i_mapping);

#ifdef CONFIG_9P_FSCACHE
	v9fs_cache_inode_put_cookie(inode);
#endif
	/* clunk the fid stashed in writeback_fid */
	if (v9inode->writeback_fid) {
		p9_client_clunk(v9inode->writeback_fid);
		v9inode->writeback_fid = NULL;
	}
}
static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
				   unsigned long nr_segs, loff_t pos)
{
	struct inode *inode = file_inode(iocb->ki_filp);
	ssize_t written;
	int rc;

	written = generic_file_aio_write(iocb, iov, nr_segs, pos);

	if (CIFS_I(inode)->clientCanCacheAll)
		return written;

	rc = filemap_fdatawrite(inode->i_mapping);
	if (rc)
		cFYI(1, "cifs_file_aio_write: %d rc on %p inode", rc, inode);

	return written;
}
static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
				   unsigned long nr_segs, loff_t pos)
{
	struct inode *inode = file_inode(iocb->ki_filp);
	ssize_t written;
	int rc;

	written = generic_file_aio_write(iocb, iov, nr_segs, pos);

	if (CIFS_CACHE_WRITE(CIFS_I(inode)))
		return written;

	rc = filemap_fdatawrite(inode->i_mapping);
	if (rc)
		cifs_dbg(FYI, "cifs_file_aio_write: %d rc on %p inode\n",
			 rc, inode);

	return written;
}
示例#14
0
/*
 * Flush all dirty pages, and check for write errors.
 */
static int
nfs4_file_flush(struct file *file, fl_owner_t id)
{
	struct inode	*inode = file_inode(file);

	dprintk("NFS: flush(%pD2)\n", file);

	nfs_inc_stats(inode, NFSIOS_VFSFLUSH);
	if ((file->f_mode & FMODE_WRITE) == 0)
		return 0;

	/*
	 * If we're holding a write delegation, then check if we're required
	 * to flush the i/o on close. If not, then just start the i/o now.
	 */
	if (!nfs4_delegation_flush_on_close(inode))
		return filemap_fdatawrite(file->f_mapping);

	/* Flush writes to the server and return any errors */
	return vfs_fsync(file, 0);
}
示例#15
0
static ssize_t
cifs_write_wrapper(struct file * file, const char __user *write_data,
           size_t write_size, loff_t * poffset) 
{
	ssize_t written;

	if(file->f_dentry == NULL)
		return -EIO;
	else if(file->f_dentry->d_inode == NULL)
		return -EIO;

	cFYI(1,("In write_wrapper size %zd at %lld",write_size,*poffset));

	written = generic_file_write(file,write_data,write_size,poffset);
	if(!CIFS_I(file->f_dentry->d_inode)->clientCanCacheAll)  {
		if(file->f_dentry->d_inode->i_mapping) {
			filemap_fdatawrite(file->f_dentry->d_inode->i_mapping);
		}
	}
	return written;
}
示例#16
0
文件: file.c 项目: LBSHackathon/linux
/*
 * Flush all dirty pages, and check for write errors.
 */
int
nfs_file_flush(struct file *file, fl_owner_t id)
{
	struct inode	*inode = file_inode(file);

	dprintk("NFS: flush(%pD2)\n", file);

	nfs_inc_stats(inode, NFSIOS_VFSFLUSH);
	if ((file->f_mode & FMODE_WRITE) == 0)
		return 0;

	/*
	 * If we're holding a write delegation, then just start the i/o
	 * but don't wait for completion (or send a commit).
	 */
	if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
		return filemap_fdatawrite(file->f_mapping);

	/* Flush writes to the server and return any errors */
	return vfs_fsync(file, 0);
}
示例#17
0
/*
 * MS_SYNC syncs the entire file - including mappings.
 *
 * MS_ASYNC does not start I/O (it used to, up to 2.5.67).  Instead, it just
 * marks the relevant pages dirty.  The application may now run fsync() to
 * write out the dirty pages and wait on the writeout and check the result.
 * Or the application may run fadvise(FADV_DONTNEED) against the fd to start
 * async writeout immediately.
 * So my _not_ starting I/O in MS_ASYNC we provide complete flexibility to
 * applications.
 */
static int msync_interval(struct vm_area_struct * vma,
	unsigned long start, unsigned long end, int flags)
{
	int ret = 0;
	struct file * file = vma->vm_file;

	if ((flags & MS_INVALIDATE) && (vma->vm_flags & VM_LOCKED))
		return -EBUSY;

	if (file && (vma->vm_flags & VM_SHARED)) {
		ret = filemap_sync(vma, start, end-start, flags);

		if (!ret && (flags & MS_SYNC)) {
			struct address_space *mapping = file->f_mapping;
			int err;

			ret = filemap_fdatawrite(mapping);
			if (file->f_op && file->f_op->fsync) {
				/*
				 * We don't take i_sem here because mmap_sem
				 * is already held.
				 */
				err = file->f_op->fsync(file,file->f_dentry,1);
				if (err && !ret)
					ret = err;
			}
			err = filemap_fdatawait(mapping);
			
#ifdef CONFIG_MOT_WFN484
			if (test_and_clear_bit(AS_MCTIME, &mapping->flags))
					    inode_update_time(mapping->host, 1); 
#endif
			
			if (!ret)
				ret = err;
		}
	}
	return ret;
}
示例#18
0
static void rgrp_go_sync(struct gfs2_glock *gl)
{
	struct address_space *metamapping = gfs2_glock2aspace(gl);
	struct gfs2_rgrpd *rgd;
	int error;

	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
		return;
	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);

	gfs2_log_flush(gl->gl_sbd, gl);
	filemap_fdatawrite(metamapping);
	error = filemap_fdatawait(metamapping);
        mapping_set_error(metamapping, error);
	gfs2_ail_empty_gl(gl);

	spin_lock(&gl->gl_spin);
	rgd = gl->gl_object;
	if (rgd)
		gfs2_free_clones(rgd);
	spin_unlock(&gl->gl_spin);
}
示例#19
0
int
xfs_flush_pages(
	xfs_inode_t	*ip,
	xfs_off_t	first,
	xfs_off_t	last,
	uint64_t	flags,
	int		fiopt)
{
	struct address_space *mapping = VFS_I(ip)->i_mapping;
	int		ret = 0;
	int		ret2;

	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
		xfs_iflags_clear(ip, XFS_ITRUNCATED);
		ret = -filemap_fdatawrite(mapping);
	}
	if (flags & XBF_ASYNC)
		return ret;
	ret2 = xfs_wait_on_pages(ip, first, last);
	if (!ret)
		ret = ret2;
	return ret;
}
示例#20
0
文件: cifsfs.c 项目: 020gzh/linux
static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
	struct inode *inode = file_inode(iocb->ki_filp);
	struct cifsInodeInfo *cinode = CIFS_I(inode);
	ssize_t written;
	int rc;

	if (iocb->ki_filp->f_flags & O_DIRECT) {
		written = cifs_user_writev(iocb, from);
		if (written > 0 && CIFS_CACHE_READ(cinode)) {
			cifs_zap_mapping(inode);
			cifs_dbg(FYI,
				 "Set no oplock for inode=%p after a write operation\n",
				 inode);
			cinode->oplock = 0;
		}
		return written;
	}

	written = cifs_get_writer(cinode);
	if (written)
		return written;

	written = generic_file_write_iter(iocb, from);

	if (CIFS_CACHE_WRITE(CIFS_I(inode)))
		goto out;

	rc = filemap_fdatawrite(inode->i_mapping);
	if (rc)
		cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
			 rc, inode);

out:
	cifs_put_writer(cinode);
	return written;
}
示例#21
0
static ssize_t
cifs_write_wrapper(struct file * file, const char __user *write_data,
           size_t write_size, loff_t * poffset) 
{
	ssize_t written;

	if(file == NULL)
		return -EIO;
	else if(file->f_dentry == NULL)
		return -EIO;
	else if(file->f_dentry->d_inode == NULL)
		return -EIO;

	cFYI(1,("In write_wrapper size %zd at %lld",write_size,*poffset));

#ifdef CONFIG_CIFS_EXPERIMENTAL    /* BB fixme - fix user char * to kernel char * mapping here BB */
	/* check whether we can cache writes locally */
	if(file->f_dentry->d_sb) {
		struct cifs_sb_info *cifs_sb;
		cifs_sb = CIFS_SB(file->f_dentry->d_sb);
		if(cifs_sb != NULL) {
			if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
				return cifs_user_write(file,write_data,
							write_size,poffset);
			}
		}
	}
#endif /* CIFS_EXPERIMENTAL */
	written = generic_file_write(file,write_data,write_size,poffset);
	if(!CIFS_I(file->f_dentry->d_inode)->clientCanCacheAll)  {
		if(file->f_dentry->d_inode->i_mapping) {
			filemap_fdatawrite(file->f_dentry->d_inode->i_mapping);
		}
	}
	return written;
}
示例#22
0
static void inode_go_sync(struct gfs2_glock *gl)
{
	struct gfs2_inode *ip = gl->gl_object;

	if (ip && !S_ISREG(ip->i_inode.i_mode))
		ip = NULL;

	if (test_bit(GLF_DIRTY, &gl->gl_flags)) {
		gfs2_log_flush(gl->gl_sbd, gl);
		if (ip)
			filemap_fdatawrite(ip->i_inode.i_mapping);
		gfs2_meta_sync(gl);
		if (ip) {
			struct address_space *mapping = ip->i_inode.i_mapping;
			int error = filemap_fdatawait(mapping);
			if (error == -ENOSPC)
				set_bit(AS_ENOSPC, &mapping->flags);
			else if (error)
				set_bit(AS_EIO, &mapping->flags);
		}
		clear_bit(GLF_DIRTY, &gl->gl_flags);
		gfs2_ail_empty_gl(gl);
	}
}
示例#23
0
static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
					struct file *filp,
					struct f2fs_defragment *range)
{
	struct inode *inode = file_inode(filp);
	struct f2fs_map_blocks map = { .m_next_pgofs = NULL };
	struct extent_info ei;
	pgoff_t pg_start, pg_end;
	unsigned int blk_per_seg = sbi->blocks_per_seg;
	unsigned int total = 0, sec_num;
	unsigned int pages_per_sec = sbi->segs_per_sec * blk_per_seg;
	block_t blk_end = 0;
	bool fragmented = false;
	int err;

	/* if in-place-update policy is enabled, don't waste time here */
	if (need_inplace_update(inode))
		return -EINVAL;

	pg_start = range->start >> PAGE_CACHE_SHIFT;
	pg_end = (range->start + range->len) >> PAGE_CACHE_SHIFT;

	f2fs_balance_fs(sbi, true);

	inode_lock(inode);

	/* writeback all dirty pages in the range */
	err = filemap_write_and_wait_range(inode->i_mapping, range->start,
						range->start + range->len - 1);
	if (err)
		goto out;

	/*
	 * lookup mapping info in extent cache, skip defragmenting if physical
	 * block addresses are continuous.
	 */
	if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
		if (ei.fofs + ei.len >= pg_end)
			goto out;
	}

	map.m_lblk = pg_start;

	/*
	 * lookup mapping info in dnode page cache, skip defragmenting if all
	 * physical block addresses are continuous even if there are hole(s)
	 * in logical blocks.
	 */
	while (map.m_lblk < pg_end) {
		map.m_len = pg_end - map.m_lblk;
		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_READ);
		if (err)
			goto out;

		if (!(map.m_flags & F2FS_MAP_FLAGS)) {
			map.m_lblk++;
			continue;
		}

		if (blk_end && blk_end != map.m_pblk) {
			fragmented = true;
			break;
		}
		blk_end = map.m_pblk + map.m_len;

		map.m_lblk += map.m_len;
	}

	if (!fragmented)
		goto out;

	map.m_lblk = pg_start;
	map.m_len = pg_end - pg_start;

	sec_num = (map.m_len + pages_per_sec - 1) / pages_per_sec;

	/*
	 * make sure there are enough free section for LFS allocation, this can
	 * avoid defragment running in SSR mode when free section are allocated
	 * intensively
	 */
	if (has_not_enough_free_secs(sbi, sec_num)) {
		err = -EAGAIN;
		goto out;
	}

	while (map.m_lblk < pg_end) {
		pgoff_t idx;
		int cnt = 0;

do_map:
		map.m_len = pg_end - map.m_lblk;
		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_READ);
		if (err)
			goto clear_out;

		if (!(map.m_flags & F2FS_MAP_FLAGS)) {
			map.m_lblk++;
			continue;
		}

		set_inode_flag(F2FS_I(inode), FI_DO_DEFRAG);

		idx = map.m_lblk;
		while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
			struct page *page;

			page = get_lock_data_page(inode, idx, true);
			if (IS_ERR(page)) {
				err = PTR_ERR(page);
				goto clear_out;
			}

			set_page_dirty(page);
			f2fs_put_page(page, 1);

			idx++;
			cnt++;
			total++;
		}

		map.m_lblk = idx;

		if (idx < pg_end && cnt < blk_per_seg)
			goto do_map;

		clear_inode_flag(F2FS_I(inode), FI_DO_DEFRAG);

		err = filemap_fdatawrite(inode->i_mapping);
		if (err)
			goto out;
	}
clear_out:
	clear_inode_flag(F2FS_I(inode), FI_DO_DEFRAG);
out:
	inode_unlock(inode);
	if (!err)
		range->len = (u64)total << PAGE_CACHE_SHIFT;
	return err;
}
示例#24
0
/**
 * gfs2_set_flags - set flags on an inode
 * @inode: The inode
 * @flags: The flags to set
 * @mask: Indicates which flags are valid
 *
 */
static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
{
	struct inode *inode = filp->f_path.dentry->d_inode;
	struct gfs2_inode *ip = GFS2_I(inode);
	struct gfs2_sbd *sdp = GFS2_SB(inode);
	struct buffer_head *bh;
	struct gfs2_holder gh;
	int error;
	u32 new_flags, flags;

	error = mnt_want_write(filp->f_path.mnt);
	if (error)
		return error;

	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
	if (error)
		goto out_drop_write;

	error = -EACCES;
	if (!is_owner_or_cap(inode))
		goto out;

	error = 0;
	flags = ip->i_diskflags;
	new_flags = (flags & ~mask) | (reqflags & mask);
	if ((new_flags ^ flags) == 0)
		goto out;

	error = -EINVAL;
	if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET)
		goto out;

	error = -EPERM;
	if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
		goto out;
	if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
		goto out;
	if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) &&
	    !capable(CAP_LINUX_IMMUTABLE))
		goto out;
	if (!IS_IMMUTABLE(inode)) {
		error = gfs2_permission(inode, MAY_WRITE);
		if (error)
			goto out;
	}
	if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
		if (flags & GFS2_DIF_JDATA)
			gfs2_log_flush(sdp, ip->i_gl);
		error = filemap_fdatawrite(inode->i_mapping);
		if (error)
			goto out;
		error = filemap_fdatawait(inode->i_mapping);
		if (error)
			goto out;
	}
	error = gfs2_trans_begin(sdp, RES_DINODE, 0);
	if (error)
		goto out;
	error = gfs2_meta_inode_buffer(ip, &bh);
	if (error)
		goto out_trans_end;
	gfs2_trans_add_meta(ip->i_gl, bh);
	ip->i_diskflags = new_flags;
	gfs2_dinode_out(ip, bh->b_data);
	brelse(bh);
	gfs2_set_inode_flags(inode);
	gfs2_set_aops(inode);
out_trans_end:
	gfs2_trans_end(sdp);
out:
	gfs2_glock_dq_uninit(&gh);
out_drop_write:
	mnt_drop_write(filp->f_path.mnt);
	return error;
}
示例#25
0
static void v9fs_clear_inode(struct inode *inode)
{
	filemap_fdatawrite(inode->i_mapping);
}
int
cifs_revalidate(struct dentry *direntry)
{
	int xid;
	int rc = 0;
	char *full_path;
	struct cifs_sb_info *cifs_sb;
	struct cifsInodeInfo *cifsInode;
	loff_t local_size;
	struct timespec local_mtime;
	int invalidate_inode = FALSE;

	if(direntry->d_inode == NULL)
		return -ENOENT;

	cifsInode = CIFS_I(direntry->d_inode);

	if(cifsInode == NULL)
		return -ENOENT;

	/* no sense revalidating inode info on file that no one can write */
	if(CIFS_I(direntry->d_inode)->clientCanCacheRead)
		return rc;

	xid = GetXid();

	cifs_sb = CIFS_SB(direntry->d_sb);

	/* can not safely grab the rename sem here if
	rename calls revalidate since that would deadlock */
	full_path = build_path_from_dentry(direntry);
	if(full_path == NULL) {
		FreeXid(xid);
		return -ENOMEM;
	}
	cFYI(1,
	     ("Revalidate: %s inode 0x%p count %d dentry: 0x%p d_time %ld jiffies %ld",
	      full_path, direntry->d_inode,
	      direntry->d_inode->i_count.counter, direntry,
	      direntry->d_time, jiffies));

	if (cifsInode->time == 0){
		/* was set to zero previously to force revalidate */
	} else if (time_before(jiffies, cifsInode->time + HZ) && lookupCacheEnabled) {
	    if((S_ISREG(direntry->d_inode->i_mode) == 0) || 
			(direntry->d_inode->i_nlink == 1)) {  
			if (full_path)
				kfree(full_path);
			FreeXid(xid);
			return rc;
		} else {
			cFYI(1,("Have to revalidate file due to hardlinks"));
		}            
	}
	
	/* save mtime and size */
	local_mtime = direntry->d_inode->i_mtime;
	local_size  = direntry->d_inode->i_size;

	if (cifs_sb->tcon->ses->capabilities & CAP_UNIX) {
		rc = cifs_get_inode_info_unix(&direntry->d_inode, full_path,
					 direntry->d_sb,xid);
		if(rc) {
			cFYI(1,("error on getting revalidate info %d",rc));
/*			if(rc != -ENOENT)
				rc = 0; */ /* BB should we cache info on certain errors? */
		}
	} else {
		rc = cifs_get_inode_info(&direntry->d_inode, full_path, NULL,
				    direntry->d_sb,xid);
		if(rc) {
			cFYI(1,("error on getting revalidate info %d",rc));
/*			if(rc != -ENOENT)
				rc = 0; */  /* BB should we cache info on certain errors? */
		}
	}
	/* should we remap certain errors, access denied?, to zero */

	/* if not oplocked, we invalidate inode pages if mtime 
	   or file size had changed on server */

	if(timespec_equal(&local_mtime,&direntry->d_inode->i_mtime) && 
		(local_size == direntry->d_inode->i_size)) {
		cFYI(1,("cifs_revalidate - inode unchanged"));
	} else {
		/* file may have changed on server */
		if(cifsInode->clientCanCacheRead) {
			/* no need to invalidate inode pages since we were
			   the only ones who could have modified the file and
			   the server copy is staler than ours */
		} else {
			invalidate_inode = TRUE;
		}
	}

	/* can not grab this sem since kernel filesys locking
		documentation indicates i_sem may be taken by the kernel 
		on lookup and rename which could deadlock if we grab
		the i_sem here as well */
/*	down(&direntry->d_inode->i_sem);*/
	/* need to write out dirty pages here  */
	if(direntry->d_inode->i_mapping) {
		/* do we need to lock inode until after invalidate completes below? */
		filemap_fdatawrite(direntry->d_inode->i_mapping);
	}
	if(invalidate_inode) {
		filemap_fdatawait(direntry->d_inode->i_mapping);
		/* may eventually have to do this for open files too */
		if(list_empty(&(cifsInode->openFileList))) {
			/* Has changed on server - flush read ahead pages */
			cFYI(1,("Invalidating read ahead data on closed file"));
			invalidate_remote_inode(direntry->d_inode);
		}
	}
/*	up(&direntry->d_inode->i_sem);*/
	
	if (full_path)
		kfree(full_path);
	FreeXid(xid);

	return rc;
}
示例#27
0
文件: sync.c 项目: AiWinters/linux
static void fdatawrite_one_bdev(struct block_device *bdev, void *arg)
{
	filemap_fdatawrite(bdev->bd_inode->i_mapping);
}
示例#28
0
static int cifs_oplock_thread(void *dummyarg)
{
	struct oplock_q_entry *oplock_item;
	struct cifsTconInfo *pTcon;
	struct inode *inode;
	__u16  netfid;
	int rc, waitrc = 0;

	set_freezable();
	do {
		if (try_to_freeze())
			continue;

		spin_lock(&GlobalMid_Lock);
		if (list_empty(&GlobalOplock_Q)) {
			spin_unlock(&GlobalMid_Lock);
			set_current_state(TASK_INTERRUPTIBLE);
			schedule_timeout(39*HZ);
		} else {
			oplock_item = list_entry(GlobalOplock_Q.next,
						struct oplock_q_entry, qhead);
			cFYI(1, ("found oplock item to write out"));
			pTcon = oplock_item->tcon;
			inode = oplock_item->pinode;
			netfid = oplock_item->netfid;
			spin_unlock(&GlobalMid_Lock);
			DeleteOplockQEntry(oplock_item);
			/* can not grab inode sem here since it would
				deadlock when oplock received on delete
				since vfs_unlink holds the i_mutex across
				the call */
			/* mutex_lock(&inode->i_mutex);*/
			if (S_ISREG(inode->i_mode)) {
#ifdef CONFIG_CIFS_EXPERIMENTAL
				if (CIFS_I(inode)->clientCanCacheAll == 0)
					break_lease(inode, FMODE_READ);
				else if (CIFS_I(inode)->clientCanCacheRead == 0)
					break_lease(inode, FMODE_WRITE);
#endif
				rc = filemap_fdatawrite(inode->i_mapping);
				if (CIFS_I(inode)->clientCanCacheRead == 0) {
					waitrc = filemap_fdatawait(
							      inode->i_mapping);
					invalidate_remote_inode(inode);
				}
				if (rc == 0)
					rc = waitrc;
			} else
				rc = 0;
			/* mutex_unlock(&inode->i_mutex);*/
			if (rc)
				CIFS_I(inode)->write_behind_rc = rc;
			cFYI(1, ("Oplock flush inode %p rc %d",
				inode, rc));

				/* releasing stale oplock after recent reconnect
				of smb session using a now incorrect file
				handle is not a data integrity issue but do
				not bother sending an oplock release if session
				to server still is disconnected since oplock
				already released by the server in that case */
			if (!pTcon->need_reconnect) {
				rc = CIFSSMBLock(0, pTcon, netfid,
						0 /* len */ , 0 /* offset */, 0,
						0, LOCKING_ANDX_OPLOCK_RELEASE,
						false /* wait flag */);
				cFYI(1, ("Oplock release rc = %d", rc));
			}
			set_current_state(TASK_INTERRUPTIBLE);
			schedule_timeout(1);  /* yield in case q were corrupt */
		}
	} while (!kthread_should_stop());

	return 0;
}
示例#29
0
static int cifs_oplock_thread(void * dummyarg)
{
	struct oplock_q_entry * oplock_item;
	struct cifsTconInfo *pTcon;
	struct inode * inode;
	__u16  netfid;
	int rc;

	daemonize("cifsoplockd");
	allow_signal(SIGTERM);

	oplockThread = current;
	do {
		set_current_state(TASK_INTERRUPTIBLE);
		
		schedule_timeout(1*HZ);  
		spin_lock(&GlobalMid_Lock);
		if(list_empty(&GlobalOplock_Q)) {
			spin_unlock(&GlobalMid_Lock);
			set_current_state(TASK_INTERRUPTIBLE);
			schedule_timeout(39*HZ);
		} else {
			oplock_item = list_entry(GlobalOplock_Q.next, 
				struct oplock_q_entry, qhead);
			if(oplock_item) {
				cFYI(1,("found oplock item to write out")); 
				pTcon = oplock_item->tcon;
				inode = oplock_item->pinode;
				netfid = oplock_item->netfid;
				spin_unlock(&GlobalMid_Lock);
				DeleteOplockQEntry(oplock_item);
				/* can not grab inode sem here since it would
				deadlock when oplock received on delete 
				since vfs_unlink holds the i_sem across
				the call */
				/* down(&inode->i_sem);*/
				if (S_ISREG(inode->i_mode)) {
					rc = filemap_fdatawrite(inode->i_mapping);
					if(CIFS_I(inode)->clientCanCacheRead == 0) {
						filemap_fdatawait(inode->i_mapping);
						invalidate_remote_inode(inode);
					}
				} else
					rc = 0;
				/* up(&inode->i_sem);*/
				if (rc)
					CIFS_I(inode)->write_behind_rc = rc;
				cFYI(1,("Oplock flush inode %p rc %d",inode,rc));

				/* releasing a stale oplock after recent reconnection 
				of smb session using a now incorrect file 
				handle is not a data integrity issue but do  
				not bother sending an oplock release if session 
				to server still is disconnected since oplock 
				already released by the server in that case */
				if(pTcon->tidStatus != CifsNeedReconnect) {
				    rc = CIFSSMBLock(0, pTcon, netfid,
					    0 /* len */ , 0 /* offset */, 0, 
					    0, LOCKING_ANDX_OPLOCK_RELEASE,
					    0 /* wait flag */);
					cFYI(1,("Oplock release rc = %d ",rc));
				}
			} else
				spin_unlock(&GlobalMid_Lock);
		}
	} while(!signal_pending(current));
	complete_and_exit (&cifs_oplock_exited, 0);
	oplockThread = NULL;
}
示例#30
0
文件: file.c 项目: Mr-Aloof/wl500g
static int ocfs2_sync_inode(struct inode *inode)
{
	filemap_fdatawrite(inode->i_mapping);
	return sync_mapping_buffers(inode->i_mapping);
}