Пример #1
0
static loff_t cifs_llseek(struct file *file, loff_t offset, int origin)
{
	/*
	 * origin == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
	 * the cached file length
	 */
	if (origin != SEEK_SET && origin != SEEK_CUR) {
		int rc;
		struct inode *inode = file->f_path.dentry->d_inode;

		/*
		 * We need to be sure that all dirty pages are written and the
		 * server has the newest file length.
		 */
		if (!CIFS_I(inode)->clientCanCacheRead && inode->i_mapping &&
		    inode->i_mapping->nrpages != 0) {
			rc = filemap_fdatawait(inode->i_mapping);
			if (rc) {
				mapping_set_error(inode->i_mapping, rc);
				return rc;
			}
		}
		/*
		 * Some applications poll for the file length in this strange
		 * way so we must seek to end on non-oplocked files by
		 * setting the revalidate time to zero.
		 */
		CIFS_I(inode)->time = 0;

		rc = cifs_revalidate_file_attr(file);
		if (rc < 0)
			return (loff_t)rc;
	}
	return generic_file_llseek(file, offset, origin);
}
Пример #2
0
static void inode_go_sync(struct gfs2_glock *gl)
{
	struct gfs2_inode *ip = gl->gl_object;

	if (gl->gl_state != LM_ST_UNLOCKED)
		gfs2_pte_inval(gl);
	if (gl->gl_state != LM_ST_EXCLUSIVE)
		return;

	if (ip && !S_ISREG(ip->i_inode.i_mode))
		ip = NULL;

	if (test_bit(GLF_DIRTY, &gl->gl_flags)) {
		if (ip && !gfs2_is_jdata(ip))
			filemap_fdatawrite(ip->i_inode.i_mapping);
		gfs2_log_flush(gl->gl_sbd, gl);
		if (ip && gfs2_is_jdata(ip))
			filemap_fdatawrite(ip->i_inode.i_mapping);
		gfs2_meta_sync(gl);
		if (ip) {
			struct address_space *mapping = ip->i_inode.i_mapping;
			int error = filemap_fdatawait(mapping);
			if (error == -ENOSPC)
				set_bit(AS_ENOSPC, &mapping->flags);
			else if (error)
				set_bit(AS_EIO, &mapping->flags);
		}
		clear_bit(GLF_DIRTY, &gl->gl_flags);
		gfs2_ail_empty_gl(gl);
	}
}
Пример #3
0
/*
 * MS_SYNC syncs the entire file - including mappings.
 *
 * MS_ASYNC does not start I/O (it used to, up to 2.5.67).  Instead, it just
 * marks the relevant pages dirty.  The application may now run fsync() to
 * write out the dirty pages and wait on the writeout and check the result.
 * Or the application may run fadvise(FADV_DONTNEED) against the fd to start
 * async writeout immediately.
 * So my _not_ starting I/O in MS_ASYNC we provide complete flexibility to
 * applications.
 */
static int msync_interval(struct vm_area_struct * vma,
	unsigned long start, unsigned long end, int flags)
{
	int ret = 0;
	struct file * file = vma->vm_file;

	if ((flags & MS_INVALIDATE) && (vma->vm_flags & VM_LOCKED))
		return -EBUSY;

	if (file && (vma->vm_flags & VM_SHARED)) {
		ret = filemap_sync(vma, start, end-start, flags);

		if (!ret && (flags & MS_SYNC)) {
			struct inode *inode = file->f_dentry->d_inode;
			int err;

			down(&inode->i_sem);
			ret = filemap_fdatawrite(inode->i_mapping);
			if (file->f_op && file->f_op->fsync) {
				err = file->f_op->fsync(file,file->f_dentry,1);
				if (err && !ret)
					ret = err;
			}
			err = filemap_fdatawait(inode->i_mapping);
			if (!ret)
				ret = err;
			up(&inode->i_sem);
		}
	}
	return ret;
}
Пример #4
0
static int do_unlk(struct file *filp, int cmd, struct file_lock *fl)
{
	struct inode *inode = filp->f_mapping->host;
	sigset_t oldset;
	int status;

	rpc_clnt_sigmask(NFS_CLIENT(inode), &oldset);
	/*
	 * Flush all pending writes before doing anything
	 * with locks..
	 */
	filemap_fdatawrite(filp->f_mapping);
	down(&inode->i_sem);
	nfs_wb_all(inode);
	up(&inode->i_sem);
	filemap_fdatawait(filp->f_mapping);

	/* NOTE: special case
	 * 	If we're signalled while cleaning up locks on process exit, we
	 * 	still need to complete the unlock.
	 */
	lock_kernel();
	/* Use local locking if mounted with "-onolock" */
	if (!(NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM))
		status = NFS_PROTO(inode)->lock(filp, cmd, fl);
	else
		status = do_vfs_lock(filp, fl);
	unlock_kernel();
	rpc_clnt_sigunmask(NFS_CLIENT(inode), &oldset);
	return status;
}
Пример #5
0
/**
 * Close a regular file.
 *
 * @param inode         the inode
 * @param file          the file
 * @returns 0 on success, Linux error code otherwise
 */
static int sf_reg_release(struct inode *inode, struct file *file)
{
    int rc;
    struct sf_reg_info *sf_r;
    struct sf_glob_info *sf_g;
    struct sf_inode_info *sf_i = GET_INODE_INFO(inode);

    TRACE();
    sf_g = GET_GLOB_INFO(inode->i_sb);
    sf_r = file->private_data;

    BUG_ON(!sf_g);
    BUG_ON(!sf_r);

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25)
    /* See the smbfs source (file.c). mmap in particular can cause data to be
     * written to the file after it is closed, which we can't cope with.  We
     * copy and paste the body of filemap_write_and_wait() here as it was not
     * defined before 2.6.6 and not exported until quite a bit later. */
    /* filemap_write_and_wait(inode->i_mapping); */
    if (   inode->i_mapping->nrpages
        && filemap_fdatawrite(inode->i_mapping) != -EIO)
        filemap_fdatawait(inode->i_mapping);
#endif
    rc = vboxCallClose(&client_handle, &sf_g->map, sf_r->handle);
    if (RT_FAILURE(rc))
        LogFunc(("vboxCallClose failed rc=%Rrc\n", rc));

    kfree(sf_r);
    sf_i->file = NULL;
    sf_i->handle = SHFL_HANDLE_NIL;
    file->private_data = NULL;
    return 0;
}
Пример #6
0
/*
 * MS_SYNC syncs the entire file - including mappings.
 *
 * MS_ASYNC does not start I/O (it used to, up to 2.5.67).  Instead, it just
 * marks the relevant pages dirty.  The application may now run fsync() to
 * write out the dirty pages and wait on the writeout and check the result.
 * Or the application may run fadvise(FADV_DONTNEED) against the fd to start
 * async writeout immediately.
 * So my _not_ starting I/O in MS_ASYNC we provide complete flexibility to
 * applications.
 */
static int msync_interval(struct vm_area_struct * vma,
	unsigned long start, unsigned long end, int flags)
{
	int ret = 0;
	struct file * file = vma->vm_file;

	if ((flags & MS_INVALIDATE) && (vma->vm_flags & VM_LOCKED))
		return -EBUSY;

	if (file && (vma->vm_flags & VM_SHARED)) {
		ret = filemap_sync(vma, start, end-start, flags);

		if (!ret && (flags & MS_SYNC)) {
			struct address_space *mapping = file->f_mapping;
			int err;

			ret = filemap_fdatawrite(mapping);
			if (file->f_op && file->f_op->fsync) {
				/*
				 * We don't take i_sem here because mmap_sem
				 * is already held.
				 */
				err = file->f_op->fsync(file,file->f_dentry,1);
				if (err && !ret)
					ret = err;
			}
			err = filemap_fdatawait(mapping);
			if (!ret)
				ret = err;
		}
	}
	return ret;
}
Пример #7
0
long do_fsync(struct file *file, int datasync)
{
	int ret;
	int err;
	struct address_space *mapping = file->f_mapping;

	if (!file->f_op || !file->f_op->fsync) {
		/* Why?  We can still call filemap_fdatawrite */
		ret = -EINVAL;
		goto out;
	}

	ret = filemap_fdatawrite(mapping);

	/*
	 * We need to protect against concurrent writers, which could cause
	 * livelocks in fsync_buffers_list().
	 */
	mutex_lock(&mapping->host->i_mutex);
	err = file->f_op->fsync(file, file->f_path.dentry, datasync);
	if (!ret)
		ret = err;
	mutex_unlock(&mapping->host->i_mutex);
	err = filemap_fdatawait(mapping);
	if (!ret)
		ret = err;
out:
	return ret;
}
Пример #8
0
long do_fsync(struct file *file, int datasync)
{
	int ret;
	int err;
	struct address_space *mapping = file->f_mapping;
	
	if (live_transaction()){
		/* DEP 5/27/10 - Defer fsync until commit. */
		struct deferred_object_operation *def_op;
		txobj_thread_list_node_t *list_node = workset_has_object(&file->f_mapping->host->xobj);

		if (!list_node) {
			tx_cache_get_file_ro(file);
			tx_cache_get_inode_ro(file->f_mapping->host);
			list_node = workset_has_object(&file->f_mapping->host->xobj); 
		}

		def_op = alloc_deferred_object_operation();
		INIT_LIST_HEAD(&def_op->list);
		def_op->type = DEFERRED_TYPE_FSYNC;
		def_op->u.fsync.datasync = datasync;
		def_op->u.fsync.file = file;

		/* DEP: Pin the file until the sync is executed */
		tx_atomic_inc_not_zero(&file->f_count);

		// XXX: Could probably use something finer grained here.  
		WORKSET_LOCK(current->transaction);
		list_add(&def_op->list, &list_node->deferred_operations);
		WORKSET_UNLOCK(current->transaction);
		return 0;
	}

	if (!file->f_op || !file->f_op->fsync) {
		/* Why?  We can still call filemap_fdatawrite */
		ret = -EINVAL;
		goto out;
	}

	ret = filemap_fdatawrite(mapping);

	/*
	 * We need to protect against concurrent writers, which could cause
	 * livelocks in fsync_buffers_list().
	 */
	if (!committing_transaction())
		mutex_lock(&mapping->host->i_mutex);
	err = file->f_op->fsync(file, file_get_dentry(file), datasync);
	if (!ret)
		ret = err;
	if (!committing_transaction())
		mutex_unlock(&mapping->host->i_mutex);
	err = filemap_fdatawait(mapping);
	if (!ret)
		ret = err;
out:
	return ret;
}
Пример #9
0
static int do_setlk(struct file *filp, int cmd, struct file_lock *fl)
{
	struct inode *inode = filp->f_mapping->host;
	int status;

	/*
	 * Flush all pending writes before doing anything
	 * with locks..
	 */
	status = filemap_fdatawrite(filp->f_mapping);
	if (status == 0) {
		down(&inode->i_sem);
		status = nfs_wb_all(inode);
		up(&inode->i_sem);
		if (status == 0)
			status = filemap_fdatawait(filp->f_mapping);
	}
	if (status < 0)
		return status;

	lock_kernel();
	status = NFS_PROTO(inode)->lock(filp, cmd, fl);
	/* If we were signalled we still need to ensure that
	 * we clean up any state on the server. We therefore
	 * record the lock call as having succeeded in order to
	 * ensure that locks_remove_posix() cleans it out when
	 * the process exits.
	 */
	if (status == -EINTR || status == -ERESTARTSYS)
		posix_lock_file(filp, fl);
	unlock_kernel();
	if (status < 0)
		return status;
	/*
	 * Make sure we clear the cache whenever we try to get the lock.
	 * This makes locking act as a cache coherency point.
	 */
	filemap_fdatawrite(filp->f_mapping);
	down(&inode->i_sem);
	nfs_wb_all(inode);	/* we may have slept */
	up(&inode->i_sem);
	filemap_fdatawait(filp->f_mapping);
	nfs_zap_caches(inode);
	return 0;
}
Пример #10
0
void gfs2_meta_sync(struct gfs2_glock *gl)
{
	struct address_space *mapping = gl->gl_aspace->i_mapping;
	int error;

	filemap_fdatawrite(mapping);
	error = filemap_fdatawait(mapping);

	if (error)
		gfs2_io_error(gl->gl_sbd);
}
Пример #11
0
int
xfs_wait_on_pages(
	xfs_inode_t	*ip,
	xfs_off_t	first,
	xfs_off_t	last)
{
	struct address_space *mapping = VFS_I(ip)->i_mapping;

	if (mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
		return -filemap_fdatawait(mapping);
	return 0;
}
Пример #12
0
static void inode_go_sync(struct gfs2_glock *gl)
{
	struct gfs2_inode *ip = gfs2_glock2inode(gl);
	int isreg = ip && S_ISREG(ip->i_inode.i_mode);
	struct address_space *metamapping = gfs2_glock2aspace(gl);
	int error;

	if (isreg) {
		if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
			unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
		inode_dio_wait(&ip->i_inode);
	}
	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
		goto out;

	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);

	gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
		       GFS2_LFC_INODE_GO_SYNC);
	filemap_fdatawrite(metamapping);
	if (isreg) {
		struct address_space *mapping = ip->i_inode.i_mapping;
		filemap_fdatawrite(mapping);
		error = filemap_fdatawait(mapping);
		mapping_set_error(mapping, error);
	}
	error = filemap_fdatawait(metamapping);
	mapping_set_error(metamapping, error);
	gfs2_ail_empty_gl(gl);
	/*
	 * Writeback of the data mapping may cause the dirty flag to be set
	 * so we have to clear it again here.
	 */
	smp_mb__before_atomic();
	clear_bit(GLF_DIRTY, &gl->gl_flags);

out:
	gfs2_clear_glop_pending(ip);
}
Пример #13
0
static int
lofs_fsync(FSYNC_ARGS(struct file *file, 
                struct dentry *dentry, 
                loff_t start,
                loff_t end,
                int datasync))
{
    struct file *lower = lofs_file_to_lower(file);
    int result = 0;

    /* Make sure the LOFS pages are flushed out to the lower filesystem.
     * Different kernels have different utility functions to help with
     * this; in the worst case (2.6.9) we roll our own.
     */

#if defined(HAVE_OFFSET_IN_FSYNC)
    /* This is the 3.2.0+ version. */

    result = filemap_write_and_wait_range(file->f_mapping, start, end);
#elif defined(HAVE_FILEMAP_WRITE_AND_WAIT)
    /* This is the 2.6.18 - 3.2.0 version. */

    result = filemap_write_and_wait(file->f_mapping);
#else
    /* This is for versions prior to 2.6.18.  This is basically a copy of
     * the implementation of filemap_write_and_wait, which unfortunately was
     * not added until 2.6.18 or so.
     */

    if (file->f_mapping->nrpages) {
        result = filemap_fdatawrite(file->f_mapping);
        if (result != -EIO) {
            int result2 = filemap_fdatawait(file->f_mapping);
            if (result == 0) {
                result = result2;
            }
        }
    }
#endif
    if (result != 0) {
        return result;
    }

    /* Then give the lower filesystem a chance to do its own sync. */

    return FSYNC_HELPER(FSYNC_ARGS(lower, 
                    lofs_dentry_to_lower(dentry),
                    start,
                    end,
                    datasync));
}
Пример #14
0
static int __block_fsync(struct inode * inode)
{
	int ret, err;

	ret = filemap_fdatasync(inode->i_mapping);
	err = sync_buffers(inode->i_rdev, 1);
	if (err && !ret)
		ret = err;
	err = filemap_fdatawait(inode->i_mapping);
	if (err && !ret)
		ret = err;

	return ret;
}
Пример #15
0
static int
smb_file_release(struct inode *inode, struct file * file)
{
	lock_kernel();
	if (!--inode->u.smbfs_i.openers) {
		/* We must flush any dirty pages now as we won't be able to
		   write anything after close. mmap can trigger this.
		   "openers" should perhaps include mmap'ers ... */
		filemap_fdatasync(inode->i_mapping);
		filemap_fdatawait(inode->i_mapping);
		smb_close(inode);
	}
	unlock_kernel();
	return 0;
}
Пример #16
0
static void inode_go_sync(struct gfs2_glock *gl)
{
	struct gfs2_inode *ip = gl->gl_object;
	struct address_space *metamapping = gfs2_glock2aspace(gl);
	int error;

	if (ip && !S_ISREG(ip->i_inode.i_mode))
		ip = NULL;
	if (ip) {
		if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
			unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
		inode_dio_wait(&ip->i_inode);
	}
	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
		return;

	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);

	gfs2_log_flush(gl->gl_sbd, gl, NORMAL_FLUSH);
	filemap_fdatawrite(metamapping);
	if (ip) {
		struct address_space *mapping = ip->i_inode.i_mapping;
		filemap_fdatawrite(mapping);
		error = filemap_fdatawait(mapping);
		mapping_set_error(mapping, error);
	}
	error = filemap_fdatawait(metamapping);
	mapping_set_error(metamapping, error);
	gfs2_ail_empty_gl(gl);
	/*
	 * Writeback of the data mapping may cause the dirty flag to be set
	 * so we have to clear it again here.
	 */
	smp_mb__before_atomic();
	clear_bit(GLF_DIRTY, &gl->gl_flags);
}
Пример #17
0
static void rgrp_go_sync(struct gfs2_glock *gl)
{
	struct address_space *metamapping = gfs2_glock2aspace(gl);
	int error;

	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
		return;
	BUG_ON(gl->gl_state != LM_ST_EXCLUSIVE);

	gfs2_log_flush(gl->gl_sbd, gl);
	filemap_fdatawrite(metamapping);
	error = filemap_fdatawait(metamapping);
        mapping_set_error(metamapping, error);
	gfs2_ail_empty_gl(gl);
}
Пример #18
0
/**
 * vfs_fsync - perform a fsync or fdatasync on a file
 * @file:		file to sync
 * @dentry:		dentry of @file
 * @data:		only perform a fdatasync operation
 *
 * Write back data and metadata for @file to disk.  If @datasync is
 * set only metadata needed to access modified file data is written.
 *
 * In case this function is called from nfsd @file may be %NULL and
 * only @dentry is set.  This can only happen when the filesystem
 * implements the export_operations API.
 */
int vfs_fsync(struct file *file, struct dentry *dentry, int datasync)
{
	const struct file_operations *fop;
	struct address_space *mapping;
	int err, ret;

	/*
	 * Get mapping and operations from the file in case we have
	 * as file, or get the default values for them in case we
	 * don't have a struct file available.  Damn nfsd..
	 */
	if (file) {
#ifdef CONFIG_KRG_FAF
		if (file->f_flags & O_FAF_CLT) {
			ret = krg_faf_fsync(file);
			goto out;
		}
#endif
		mapping = file->f_mapping;
		fop = file->f_op;
	} else {
		mapping = dentry->d_inode->i_mapping;
		fop = dentry->d_inode->i_fop;
	}

	if (!fop || !fop->fsync) {
		ret = -EINVAL;
		goto out;
	}

	ret = filemap_fdatawrite(mapping);

	/*
	 * We need to protect against concurrent writers, which could cause
	 * livelocks in fsync_buffers_list().
	 */
	mutex_lock(&mapping->host->i_mutex);
	err = fop->fsync(file, dentry, datasync);
	if (!ret)
		ret = err;
	mutex_unlock(&mapping->host->i_mutex);
	err = filemap_fdatawait(mapping);
	if (!ret)
		ret = err;
out:
	return ret;
}
Пример #19
0
/*
 * vnode pcache layer for vnode_flushinval_pages.
 * 'last' parameter unused but left in for IRIX compatibility
 */
void
fs_flushinval_pages(
	bhv_desc_t	*bdp,
	xfs_off_t	first,
	xfs_off_t	last,
	int		fiopt)
{
	vnode_t		*vp = BHV_TO_VNODE(bdp);
	struct inode	*ip = LINVFS_GET_IP(vp);

	if (VN_CACHED(vp)) {
		filemap_fdatasync(ip->i_mapping);
		fsync_inode_data_buffers(ip);
		filemap_fdatawait(ip->i_mapping);

		truncate_inode_pages(ip->i_mapping, first);
	}
}
Пример #20
0
/*
 * vnode pcache layer for vnode_flush_pages.
 * 'last' parameter unused but left in for IRIX compatibility
 */
int
fs_flush_pages(
	bhv_desc_t	*bdp,
	xfs_off_t	first,
	xfs_off_t	last,
	uint64_t	flags,
	int		fiopt)
{
	vnode_t		*vp = BHV_TO_VNODE(bdp);
	struct inode	*ip = LINVFS_GET_IP(vp);

	if (VN_CACHED(vp)) {
		filemap_fdatasync(ip->i_mapping);
		fsync_inode_data_buffers(ip);
		filemap_fdatawait(ip->i_mapping);
	}

	return 0;
}
Пример #21
0
static void rgrp_go_sync(struct gfs2_glock *gl)
{
	struct address_space *metamapping = gfs2_glock2aspace(gl);
	struct gfs2_rgrpd *rgd;
	int error;

	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
		return;
	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);

	gfs2_log_flush(gl->gl_sbd, gl);
	filemap_fdatawrite(metamapping);
	error = filemap_fdatawait(metamapping);
        mapping_set_error(metamapping, error);
	gfs2_ail_empty_gl(gl);

	spin_lock(&gl->gl_spin);
	rgd = gl->gl_object;
	if (rgd)
		gfs2_free_clones(rgd);
	spin_unlock(&gl->gl_spin);
}
Пример #22
0
/*
 * MS_SYNC syncs the entire file - including mappings.
 *
 * MS_ASYNC does not start I/O (it used to, up to 2.5.67).  Instead, it just
 * marks the relevant pages dirty.  The application may now run fsync() to
 * write out the dirty pages and wait on the writeout and check the result.
 * Or the application may run fadvise(FADV_DONTNEED) against the fd to start
 * async writeout immediately.
 * So my _not_ starting I/O in MS_ASYNC we provide complete flexibility to
 * applications.
 */
static int msync_interval(struct vm_area_struct * vma,
	unsigned long start, unsigned long end, int flags)
{
	int ret = 0;
	struct file * file = vma->vm_file;

	if ((flags & MS_INVALIDATE) && (vma->vm_flags & VM_LOCKED))
		return -EBUSY;

	if (file && (vma->vm_flags & VM_SHARED)) {
		ret = filemap_sync(vma, start, end-start, flags);

		if (!ret && (flags & MS_SYNC)) {
			struct address_space *mapping = file->f_mapping;
			int err;

			ret = filemap_fdatawrite(mapping);
			if (file->f_op && file->f_op->fsync) {
				/*
				 * We don't take i_sem here because mmap_sem
				 * is already held.
				 */
				err = file->f_op->fsync(file,file->f_dentry,1);
				if (err && !ret)
					ret = err;
			}
			err = filemap_fdatawait(mapping);
			
#ifdef CONFIG_MOT_WFN484
			if (test_and_clear_bit(AS_MCTIME, &mapping->flags))
					    inode_update_time(mapping->host, 1); 
#endif
			
			if (!ret)
				ret = err;
		}
	}
	return ret;
}
Пример #23
0
int
xfs_flush_pages(
	xfs_inode_t	*ip,
	xfs_off_t	first,
	xfs_off_t	last,
	uint64_t	flags,
	int		fiopt)
{
	struct address_space *mapping = ip->i_vnode->i_mapping;
	int		ret = 0;
	int		ret2;

	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
		xfs_iflags_clear(ip, XFS_ITRUNCATED);
		ret = filemap_fdatawrite(mapping);
		if (flags & XFS_B_ASYNC)
			return ret;
		ret2 = filemap_fdatawait(mapping);
		if (!ret)
			ret = ret2;
	}
	return ret;
}
Пример #24
0
/**
 * gfs2_set_flags - set flags on an inode
 * @inode: The inode
 * @flags: The flags to set
 * @mask: Indicates which flags are valid
 *
 */
static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
{
	struct inode *inode = filp->f_path.dentry->d_inode;
	struct gfs2_inode *ip = GFS2_I(inode);
	struct gfs2_sbd *sdp = GFS2_SB(inode);
	struct buffer_head *bh;
	struct gfs2_holder gh;
	int error;
	u32 new_flags, flags;

	error = mnt_want_write(filp->f_path.mnt);
	if (error)
		return error;

	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
	if (error)
		goto out_drop_write;

	error = -EACCES;
	if (!is_owner_or_cap(inode))
		goto out;

	error = 0;
	flags = ip->i_diskflags;
	new_flags = (flags & ~mask) | (reqflags & mask);
	if ((new_flags ^ flags) == 0)
		goto out;

	error = -EINVAL;
	if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET)
		goto out;

	error = -EPERM;
	if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
		goto out;
	if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
		goto out;
	if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) &&
	    !capable(CAP_LINUX_IMMUTABLE))
		goto out;
	if (!IS_IMMUTABLE(inode)) {
		error = gfs2_permission(inode, MAY_WRITE);
		if (error)
			goto out;
	}
	if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
		if (flags & GFS2_DIF_JDATA)
			gfs2_log_flush(sdp, ip->i_gl);
		error = filemap_fdatawrite(inode->i_mapping);
		if (error)
			goto out;
		error = filemap_fdatawait(inode->i_mapping);
		if (error)
			goto out;
	}
	error = gfs2_trans_begin(sdp, RES_DINODE, 0);
	if (error)
		goto out;
	error = gfs2_meta_inode_buffer(ip, &bh);
	if (error)
		goto out_trans_end;
	gfs2_trans_add_meta(ip->i_gl, bh);
	ip->i_diskflags = new_flags;
	gfs2_dinode_out(ip, bh->b_data);
	brelse(bh);
	gfs2_set_inode_flags(inode);
	gfs2_set_aops(inode);
out_trans_end:
	gfs2_trans_end(sdp);
out:
	gfs2_glock_dq_uninit(&gh);
out_drop_write:
	mnt_drop_write(filp->f_path.mnt);
	return error;
}
Пример #25
0
static void fdatawait_one_bdev(struct block_device *bdev, void *arg)
{
	filemap_fdatawait(bdev->bd_inode->i_mapping);
}
Пример #26
0
int
cifs_revalidate(struct dentry *direntry)
{
	int xid;
	int rc = 0;
	char *full_path;
	struct cifs_sb_info *cifs_sb;
	struct cifsInodeInfo *cifsInode;
	loff_t local_size;
	struct timespec local_mtime;
	int invalidate_inode = FALSE;

	if(direntry->d_inode == NULL)
		return -ENOENT;

	cifsInode = CIFS_I(direntry->d_inode);

	if(cifsInode == NULL)
		return -ENOENT;

	/* no sense revalidating inode info on file that no one can write */
	if(CIFS_I(direntry->d_inode)->clientCanCacheRead)
		return rc;

	xid = GetXid();

	cifs_sb = CIFS_SB(direntry->d_sb);

	/* can not safely grab the rename sem here if
	rename calls revalidate since that would deadlock */
	full_path = build_path_from_dentry(direntry);
	if(full_path == NULL) {
		FreeXid(xid);
		return -ENOMEM;
	}
	cFYI(1,
	     ("Revalidate: %s inode 0x%p count %d dentry: 0x%p d_time %ld jiffies %ld",
	      full_path, direntry->d_inode,
	      direntry->d_inode->i_count.counter, direntry,
	      direntry->d_time, jiffies));

	if (cifsInode->time == 0){
		/* was set to zero previously to force revalidate */
	} else if (time_before(jiffies, cifsInode->time + HZ) && lookupCacheEnabled) {
	    if((S_ISREG(direntry->d_inode->i_mode) == 0) || 
			(direntry->d_inode->i_nlink == 1)) {  
			if (full_path)
				kfree(full_path);
			FreeXid(xid);
			return rc;
		} else {
			cFYI(1,("Have to revalidate file due to hardlinks"));
		}            
	}
	
	/* save mtime and size */
	local_mtime = direntry->d_inode->i_mtime;
	local_size  = direntry->d_inode->i_size;

	if (cifs_sb->tcon->ses->capabilities & CAP_UNIX) {
		rc = cifs_get_inode_info_unix(&direntry->d_inode, full_path,
					 direntry->d_sb,xid);
		if(rc) {
			cFYI(1,("error on getting revalidate info %d",rc));
/*			if(rc != -ENOENT)
				rc = 0; */ /* BB should we cache info on certain errors? */
		}
	} else {
		rc = cifs_get_inode_info(&direntry->d_inode, full_path, NULL,
				    direntry->d_sb,xid);
		if(rc) {
			cFYI(1,("error on getting revalidate info %d",rc));
/*			if(rc != -ENOENT)
				rc = 0; */  /* BB should we cache info on certain errors? */
		}
	}
	/* should we remap certain errors, access denied?, to zero */

	/* if not oplocked, we invalidate inode pages if mtime 
	   or file size had changed on server */

	if(timespec_equal(&local_mtime,&direntry->d_inode->i_mtime) && 
		(local_size == direntry->d_inode->i_size)) {
		cFYI(1,("cifs_revalidate - inode unchanged"));
	} else {
		/* file may have changed on server */
		if(cifsInode->clientCanCacheRead) {
			/* no need to invalidate inode pages since we were
			   the only ones who could have modified the file and
			   the server copy is staler than ours */
		} else {
			invalidate_inode = TRUE;
		}
	}

	/* can not grab this sem since kernel filesys locking
		documentation indicates i_sem may be taken by the kernel 
		on lookup and rename which could deadlock if we grab
		the i_sem here as well */
/*	down(&direntry->d_inode->i_sem);*/
	/* need to write out dirty pages here  */
	if(direntry->d_inode->i_mapping) {
		/* do we need to lock inode until after invalidate completes below? */
		filemap_fdatawrite(direntry->d_inode->i_mapping);
	}
	if(invalidate_inode) {
		filemap_fdatawait(direntry->d_inode->i_mapping);
		/* may eventually have to do this for open files too */
		if(list_empty(&(cifsInode->openFileList))) {
			/* Has changed on server - flush read ahead pages */
			cFYI(1,("Invalidating read ahead data on closed file"));
			invalidate_remote_inode(direntry->d_inode);
		}
	}
/*	up(&direntry->d_inode->i_sem);*/
	
	if (full_path)
		kfree(full_path);
	FreeXid(xid);

	return rc;
}
Пример #27
0
static int cifs_oplock_thread(void *dummyarg)
{
	struct oplock_q_entry *oplock_item;
	struct cifsTconInfo *pTcon;
	struct inode *inode;
	__u16  netfid;
	int rc, waitrc = 0;

	set_freezable();
	do {
		if (try_to_freeze())
			continue;

		spin_lock(&GlobalMid_Lock);
		if (list_empty(&GlobalOplock_Q)) {
			spin_unlock(&GlobalMid_Lock);
			set_current_state(TASK_INTERRUPTIBLE);
			schedule_timeout(39*HZ);
		} else {
			oplock_item = list_entry(GlobalOplock_Q.next,
						struct oplock_q_entry, qhead);
			cFYI(1, ("found oplock item to write out"));
			pTcon = oplock_item->tcon;
			inode = oplock_item->pinode;
			netfid = oplock_item->netfid;
			spin_unlock(&GlobalMid_Lock);
			DeleteOplockQEntry(oplock_item);
			/* can not grab inode sem here since it would
				deadlock when oplock received on delete
				since vfs_unlink holds the i_mutex across
				the call */
			/* mutex_lock(&inode->i_mutex);*/
			if (S_ISREG(inode->i_mode)) {
#ifdef CONFIG_CIFS_EXPERIMENTAL
				if (CIFS_I(inode)->clientCanCacheAll == 0)
					break_lease(inode, FMODE_READ);
				else if (CIFS_I(inode)->clientCanCacheRead == 0)
					break_lease(inode, FMODE_WRITE);
#endif
				rc = filemap_fdatawrite(inode->i_mapping);
				if (CIFS_I(inode)->clientCanCacheRead == 0) {
					waitrc = filemap_fdatawait(
							      inode->i_mapping);
					invalidate_remote_inode(inode);
				}
				if (rc == 0)
					rc = waitrc;
			} else
				rc = 0;
			/* mutex_unlock(&inode->i_mutex);*/
			if (rc)
				CIFS_I(inode)->write_behind_rc = rc;
			cFYI(1, ("Oplock flush inode %p rc %d",
				inode, rc));

				/* releasing stale oplock after recent reconnect
				of smb session using a now incorrect file
				handle is not a data integrity issue but do
				not bother sending an oplock release if session
				to server still is disconnected since oplock
				already released by the server in that case */
			if (!pTcon->need_reconnect) {
				rc = CIFSSMBLock(0, pTcon, netfid,
						0 /* len */ , 0 /* offset */, 0,
						0, LOCKING_ANDX_OPLOCK_RELEASE,
						false /* wait flag */);
				cFYI(1, ("Oplock release rc = %d", rc));
			}
			set_current_state(TASK_INTERRUPTIBLE);
			schedule_timeout(1);  /* yield in case q were corrupt */
		}
	} while (!kthread_should_stop());

	return 0;
}
Пример #28
0
/*
 * Lock a (portion of) a file
 */
int
nfs_lock(struct file *filp, int cmd, struct file_lock *fl)
{
	struct inode * inode = filp->f_dentry->d_inode;
	int	status = 0;
	int	status2;

	dprintk("NFS: nfs_lock(f=%4x/%ld, t=%x, fl=%x, r=%Ld:%Ld)\n",
			inode->i_dev, inode->i_ino,
			fl->fl_type, fl->fl_flags,
			(long long)fl->fl_start, (long long)fl->fl_end);

	if (!inode)
		return -EINVAL;

	/* No mandatory locks over NFS */
	if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
		return -ENOLCK;

	/* Fake OK code if mounted without NLM support */
	if (NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM) {
		if (IS_GETLK(cmd))
			status = LOCK_USE_CLNT;
		goto out_ok;
	}

	/*
	 * No BSD flocks over NFS allowed.
	 * Note: we could try to fake a POSIX lock request here by
	 * using ((u32) filp | 0x80000000) or some such as the pid.
	 * Not sure whether that would be unique, though, or whether
	 * that would break in other places.
	 */
	if (!fl->fl_owner || (fl->fl_flags & (FL_POSIX|FL_BROKEN)) != FL_POSIX)
		return -ENOLCK;

	/*
	 * Flush all pending writes before doing anything
	 * with locks..
	 */
	status = filemap_fdatasync(inode->i_mapping);
	down(&inode->i_sem);
	status2 = nfs_wb_all(inode);
	if (status2 && !status)
		status = status2;
	up(&inode->i_sem);
	status2 = filemap_fdatawait(inode->i_mapping);
	if (status2 && !status)
		status = status2;
	if (status < 0)
		return status;

	lock_kernel();
	status = nlmclnt_proc(inode, cmd, fl);
	unlock_kernel();
	if (status < 0)
		return status;
	
	status = 0;

	/*
	 * Make sure we clear the cache whenever we try to get the lock.
	 * This makes locking act as a cache coherency point.
	 */
 out_ok:
	if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
		filemap_fdatasync(inode->i_mapping);
		down(&inode->i_sem);
		nfs_wb_all(inode);      /* we may have slept */
		up(&inode->i_sem);
		filemap_fdatawait(inode->i_mapping);
		nfs_zap_caches(inode);
	}
	return status;
}
Пример #29
0
static int cifs_oplock_thread(void * dummyarg)
{
	struct oplock_q_entry * oplock_item;
	struct cifsTconInfo *pTcon;
	struct inode * inode;
	__u16  netfid;
	int rc;

	daemonize("cifsoplockd");
	allow_signal(SIGTERM);

	oplockThread = current;
	do {
		set_current_state(TASK_INTERRUPTIBLE);
		
		schedule_timeout(1*HZ);  
		spin_lock(&GlobalMid_Lock);
		if(list_empty(&GlobalOplock_Q)) {
			spin_unlock(&GlobalMid_Lock);
			set_current_state(TASK_INTERRUPTIBLE);
			schedule_timeout(39*HZ);
		} else {
			oplock_item = list_entry(GlobalOplock_Q.next, 
				struct oplock_q_entry, qhead);
			if(oplock_item) {
				cFYI(1,("found oplock item to write out")); 
				pTcon = oplock_item->tcon;
				inode = oplock_item->pinode;
				netfid = oplock_item->netfid;
				spin_unlock(&GlobalMid_Lock);
				DeleteOplockQEntry(oplock_item);
				/* can not grab inode sem here since it would
				deadlock when oplock received on delete 
				since vfs_unlink holds the i_sem across
				the call */
				/* down(&inode->i_sem);*/
				if (S_ISREG(inode->i_mode)) {
					rc = filemap_fdatawrite(inode->i_mapping);
					if(CIFS_I(inode)->clientCanCacheRead == 0) {
						filemap_fdatawait(inode->i_mapping);
						invalidate_remote_inode(inode);
					}
				} else
					rc = 0;
				/* up(&inode->i_sem);*/
				if (rc)
					CIFS_I(inode)->write_behind_rc = rc;
				cFYI(1,("Oplock flush inode %p rc %d",inode,rc));

				/* releasing a stale oplock after recent reconnection 
				of smb session using a now incorrect file 
				handle is not a data integrity issue but do  
				not bother sending an oplock release if session 
				to server still is disconnected since oplock 
				already released by the server in that case */
				if(pTcon->tidStatus != CifsNeedReconnect) {
				    rc = CIFSSMBLock(0, pTcon, netfid,
					    0 /* len */ , 0 /* offset */, 0, 
					    0, LOCKING_ANDX_OPLOCK_RELEASE,
					    0 /* wait flag */);
					cFYI(1,("Oplock release rc = %d ",rc));
				}
			} else
				spin_unlock(&GlobalMid_Lock);
		}
	} while(!signal_pending(current));
	complete_and_exit (&cifs_oplock_exited, 0);
	oplockThread = NULL;
}