static int __block_fsync(struct inode * inode) { int ret, err; ret = filemap_fdatasync(inode->i_mapping); err = sync_buffers(inode->i_rdev, 1); if (err && !ret) ret = err; err = filemap_fdatawait(inode->i_mapping); if (err && !ret) ret = err; return ret; }
static int smb_file_release(struct inode *inode, struct file * file) { lock_kernel(); if (!--inode->u.smbfs_i.openers) { /* We must flush any dirty pages now as we won't be able to write anything after close. mmap can trigger this. "openers" should perhaps include mmap'ers ... */ filemap_fdatasync(inode->i_mapping); filemap_fdatawait(inode->i_mapping); smb_close(inode); } unlock_kernel(); return 0; }
/* * vnode pcache layer for vnode_flushinval_pages. * 'last' parameter unused but left in for IRIX compatibility */ void fs_flushinval_pages( bhv_desc_t *bdp, xfs_off_t first, xfs_off_t last, int fiopt) { vnode_t *vp = BHV_TO_VNODE(bdp); struct inode *ip = LINVFS_GET_IP(vp); if (VN_CACHED(vp)) { filemap_fdatasync(ip->i_mapping); fsync_inode_data_buffers(ip); filemap_fdatawait(ip->i_mapping); truncate_inode_pages(ip->i_mapping, first); } }
/* * vnode pcache layer for vnode_flush_pages. * 'last' parameter unused but left in for IRIX compatibility */ int fs_flush_pages( bhv_desc_t *bdp, xfs_off_t first, xfs_off_t last, uint64_t flags, int fiopt) { vnode_t *vp = BHV_TO_VNODE(bdp); struct inode *ip = LINVFS_GET_IP(vp); if (VN_CACHED(vp)) { filemap_fdatasync(ip->i_mapping); fsync_inode_data_buffers(ip); filemap_fdatawait(ip->i_mapping); } return 0; }
/* * Lock a (portion of) a file */ int nfs_lock(struct file *filp, int cmd, struct file_lock *fl) { struct inode * inode = filp->f_dentry->d_inode; int status = 0; int status2; dprintk("NFS: nfs_lock(f=%4x/%ld, t=%x, fl=%x, r=%Ld:%Ld)\n", inode->i_dev, inode->i_ino, fl->fl_type, fl->fl_flags, (long long)fl->fl_start, (long long)fl->fl_end); if (!inode) return -EINVAL; /* No mandatory locks over NFS */ if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID) return -ENOLCK; /* Fake OK code if mounted without NLM support */ if (NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM) { if (IS_GETLK(cmd)) status = LOCK_USE_CLNT; goto out_ok; } /* * No BSD flocks over NFS allowed. * Note: we could try to fake a POSIX lock request here by * using ((u32) filp | 0x80000000) or some such as the pid. * Not sure whether that would be unique, though, or whether * that would break in other places. */ if (!fl->fl_owner || (fl->fl_flags & (FL_POSIX|FL_BROKEN)) != FL_POSIX) return -ENOLCK; /* * Flush all pending writes before doing anything * with locks.. */ status = filemap_fdatasync(inode->i_mapping); down(&inode->i_sem); status2 = nfs_wb_all(inode); if (status2 && !status) status = status2; up(&inode->i_sem); status2 = filemap_fdatawait(inode->i_mapping); if (status2 && !status) status = status2; if (status < 0) return status; lock_kernel(); status = nlmclnt_proc(inode, cmd, fl); unlock_kernel(); if (status < 0) return status; status = 0; /* * Make sure we clear the cache whenever we try to get the lock. * This makes locking act as a cache coherency point. */ out_ok: if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) { filemap_fdatasync(inode->i_mapping); down(&inode->i_sem); nfs_wb_all(inode); /* we may have slept */ up(&inode->i_sem); filemap_fdatawait(inode->i_mapping); nfs_zap_caches(inode); } return status; }