ssize_t nfs_file_splice_write(struct pipe_inode_info *pipe, struct file *filp, loff_t *ppos, size_t count, unsigned int flags) { struct dentry *dentry = filp->f_path.dentry; struct inode *inode = dentry->d_inode; unsigned long written = 0; ssize_t ret; dprintk("NFS splice_write(%s/%s, %lu@%llu)\n", dentry->d_parent->d_name.name, dentry->d_name.name, (unsigned long) count, (unsigned long long) *ppos); /* * The combination of splice and an O_APPEND destination is disallowed. */ ret = generic_file_splice_write(pipe, filp, ppos, count, flags); if (ret > 0) written = ret; if (ret >= 0 && nfs_need_sync_write(filp, inode)) { int err = vfs_fsync(filp, 0); if (err < 0) ret = err; } if (ret > 0) nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, written); return ret; }
int coda_fsync(struct file *coda_file, loff_t start, loff_t end, int datasync) { struct file *host_file; struct inode *coda_inode = coda_file->f_path.dentry->d_inode; struct coda_file_info *cfi; int err; if (!(S_ISREG(coda_inode->i_mode) || S_ISDIR(coda_inode->i_mode) || S_ISLNK(coda_inode->i_mode))) return -EINVAL; err = filemap_write_and_wait_range(coda_inode->i_mapping, start, end); if (err) return err; mutex_lock(&coda_inode->i_mutex); cfi = CODA_FTOC(coda_file); BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC); host_file = cfi->cfi_container; err = vfs_fsync(host_file, datasync); if (!err && !datasync) err = venus_fsync(coda_inode->i_sb, coda_i2f(coda_inode)); mutex_unlock(&coda_inode->i_mutex); return err; }
static void klog_msg_write(struct klog_msg *msg) { struct file * file = NULL; loff_t pos = 0; int wrote; int size; int error; char *path = NULL; path = klog_full_path(msg->log_name); if (!path) return; file = filp_open(path, O_APPEND|O_WRONLY|O_CREAT, S_IRUSR|S_IWUSR); if (!file) { printk(KERN_ERR "klog : cant open log file"); goto cleanup; } size = strlen(msg->data); wrote = vfs_write(file, msg->data, size, &pos); if (wrote != size) { printk(KERN_ERR "klog : vfs_write result=%d, should be %d", wrote, size); } error = vfs_fsync(file, 0); if (error < 0) printk(KERN_ERR "klog : vfs_fsync err=%d", error); filp_close(file, NULL); cleanup: kfree(path); }
static int ecryptfs_fsync(struct file *file, struct dentry *dentry, int datasync) { return vfs_fsync(ecryptfs_file_to_lower(file), ecryptfs_dentry_to_lower(dentry), datasync); }
static int do_fsync(unsigned int fd, int datasync) { struct file *file; int ret = -EBADF; ktime_t fsync_t, fsync_diff; char pathname[256], *path; file = fget(fd); if (file) { path = d_path(&(file->f_path), pathname, sizeof(pathname)); if (IS_ERR(path)) path = "(unknown)"; fsync_t = ktime_get(); ret = vfs_fsync(file, datasync); fput(file); fsync_diff = ktime_sub(ktime_get(), fsync_t); if (ktime_to_ms(fsync_diff) >= 5000) { pr_info("VFS: %s pid:%d(%s)(parent:%d/%s) takes %lld ms to fsync %s.\n", __func__, current->pid, current->comm, current->parent->pid, current->parent->comm, ktime_to_ms(fsync_diff), path); } } return ret; }
static int ecryptfs_fsync(struct file *file, int datasync) { if (ecryptfs_file_to_lower(file)) return vfs_fsync(ecryptfs_file_to_lower(file), datasync); return 0; }
static int do_unlk(struct file *filp, int cmd, struct file_lock *fl, int is_local) { struct inode *inode = filp->f_mapping->host; struct nfs_lock_context *l_ctx; int status; /* * Flush all pending writes before doing anything * with locks.. */ vfs_fsync(filp, 0); l_ctx = nfs_get_lock_context(nfs_file_open_context(filp)); if (!IS_ERR(l_ctx)) { status = nfs_iocounter_wait(&l_ctx->io_count); nfs_put_lock_context(l_ctx); if (status < 0) return status; } /* NOTE: special case * If we're signalled while cleaning up locks on process exit, we * still need to complete the unlock. */ /* * Use local locking if mounted with "-onolock" or with appropriate * "-olocal_lock=" */ if (!is_local) status = NFS_PROTO(inode)->lock(filp, cmd, fl); else status = do_vfs_lock(filp, fl); return status; }
static int do_fsync(unsigned int fd, int datasync) { struct file *file; int ret = -EBADF; int fput_needed; #ifdef CONFIG_ASYNC_FSYNC struct fsync_work *fwork; #endif if (!fsync_enabled) return 0; file = fget_light(fd, &fput_needed); if (file) { #ifdef CONFIG_ASYNC_FSYNC ktime_t fsync_t, fsync_diff; char pathname[256], *path; path = d_path(&(file->f_path), pathname, sizeof(pathname)); if (IS_ERR(path)) path = "(unknown)"; else if (async_fsync(file, fd)) { if (!fsync_workqueue) fsync_workqueue = create_singlethread_workqueue("fsync"); if (!fsync_workqueue) goto no_async; if (IS_ERR(path)) goto no_async; fwork = kmalloc(sizeof(*fwork), GFP_KERNEL); if (fwork) { strncpy(fwork->pathname, path, sizeof(fwork->pathname) - 1); INIT_WORK(&fwork->work, do_afsync_work); queue_work(fsync_workqueue, &fwork->work); fput_light(file, fput_needed); return 0; } } no_async: fsync_t = ktime_get(); #endif ret = vfs_fsync(file, datasync); fput_light(file, fput_needed); #ifdef CONFIG_ASYNC_FSYNC fsync_diff = ktime_sub(ktime_get(), fsync_t); if (ktime_to_ms(fsync_diff) >= 5000) { pr_info("VFS: %s pid:%d(%s)(parent:%d/%s)\ takes %lld ms to fsync %s.\n", __func__, current->pid, current->comm, current->parent->pid, current->parent->comm, ktime_to_ms(fsync_diff), path); } #endif } return ret; }
/* * Sync the file data, don't bother with the metadata. * This code was copied from fs/buffer.c:sys_fdatasync(). */ static int fsg_lun_fsync_sub(struct fsg_lun *curlun) { struct file *filp = curlun->filp; if (curlun->ro || !filp) return 0; return vfs_fsync(filp, 1); }
ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); unsigned long written = 0; ssize_t result; size_t count = iov_iter_count(from); result = nfs_key_timeout_notify(file, inode); if (result) return result; if (iocb->ki_flags & IOCB_DIRECT) { result = generic_write_checks(iocb, from); if (result <= 0) return result; return nfs_file_direct_write(iocb, from); } dprintk("NFS: write(%pD2, %zu@%Ld)\n", file, count, (long long) iocb->ki_pos); result = -EBUSY; if (IS_SWAPFILE(inode)) goto out_swapfile; /* * O_APPEND implies that we must revalidate the file length. */ if (iocb->ki_flags & IOCB_APPEND) { result = nfs_revalidate_file_size(inode, file); if (result) goto out; } result = count; if (!count) goto out; result = generic_file_write_iter(iocb, from); if (result > 0) written = result; /* Return error values */ if (result >= 0 && nfs_need_check_write(file, inode)) { int err = vfs_fsync(file, 0); if (err < 0) result = err; } if (result > 0) nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, written); out: return result; out_swapfile: printk(KERN_INFO "NFS: attempt to write to active swap file!\n"); goto out; }
static void __loop_update_dio(struct loop_device *lo, bool dio) { struct file *file = lo->lo_backing_file; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; unsigned short sb_bsize = 0; unsigned dio_align = 0; bool use_dio; if (inode->i_sb->s_bdev) { sb_bsize = bdev_logical_block_size(inode->i_sb->s_bdev); dio_align = sb_bsize - 1; } /* * We support direct I/O only if lo_offset is aligned with the * logical I/O size of backing device, and the logical block * size of loop is bigger than the backing device's and the loop * needn't transform transfer. * * TODO: the above condition may be loosed in the future, and * direct I/O may be switched runtime at that time because most * of requests in sane appplications should be PAGE_SIZE algined */ if (dio) { if (queue_logical_block_size(lo->lo_queue) >= sb_bsize && !(lo->lo_offset & dio_align) && mapping->a_ops->direct_IO && !lo->transfer) use_dio = true; else use_dio = false; } else { use_dio = false; } if (lo->use_dio == use_dio) return; /* flush dirty pages before changing direct IO */ vfs_fsync(file, 0); /* * The flag of LO_FLAGS_DIRECT_IO is handled similarly with * LO_FLAGS_READ_ONLY, both are set from kernel, and losetup * will get updated by ioctl(LOOP_GET_STATUS) */ blk_mq_freeze_queue(lo->lo_queue); lo->use_dio = use_dio; if (use_dio) lo->lo_flags |= LO_FLAGS_DIRECT_IO; else lo->lo_flags &= ~LO_FLAGS_DIRECT_IO; blk_mq_unfreeze_queue(lo->lo_queue); }
static int do_fsync(unsigned int fd, int datasync) { struct fd f = fdget(fd); int ret = -EBADF; if (f.file) { ret = vfs_fsync(f.file, datasync); fdput(f); } return ret; }
static int ecryptfs_fsync(struct file *file, loff_t start, loff_t end, int datasync) { int rc; rc = filemap_write_and_wait(file->f_mapping); if (rc) return rc; return vfs_fsync(ecryptfs_file_to_lower(file), datasync); }
static int ecryptfs_fsync(struct file *file, int datasync) { int rc = 0; rc = generic_file_fsync(file, datasync); if (rc) goto out; rc = vfs_fsync(ecryptfs_file_to_lower(file), datasync); out: return rc; }
static int do_fsync(unsigned int fd, int datasync) { struct file *file; int ret = -EBADF; file = fget(fd); if (file) { ret = vfs_fsync(file, datasync); fput(file); } return ret; }
void sys_sync(void) { inline void flush_inode(inode_t* inode) { struct inode_childs* cx; for(cx = inode->childs; cx; cx = cx->next) flush_inode(cx->inode); if(inode->dirty) vfs_fsync(inode); } flush_inode(vfs_root); });
static int tierfs_fsync(struct file *file, loff_t start, loff_t end, int datasync) { int rc; TRACE_ENTRY(); rc = filemap_write_and_wait(file->f_mapping); if (rc) return rc; TRACE_EXIT(); return vfs_fsync(tierfs_file_to_lower(file), datasync); }
static int do_fsync(unsigned int fd, int datasync) { struct file *file; int ret = -EBADF; int fput_needed; file = fget_light(fd, &fput_needed); if (file) { ret = vfs_fsync(file, datasync); fput_light(file, fput_needed); } return ret; }
/* * process fsync command, let all buffered modification to the specified file be written to the disk. */ static void rfs_fsync(struct aipc_rfs_msg *msg) { struct aipc_rfs_close *param = (struct aipc_rfs_close*)msg->parameter; int ret; ret = vfs_fsync(param->filp, 0); if(ret < 0) { DMSG("rfs_fsync error: %d\n", ret); } msg->parameter[0] = ret; }
static int do_fsync(unsigned int fd, int datasync) { struct fd f = fdget(fd); int ret = -EBADF; #ifdef CONFIG_FSYNC_OFF return 0; #endif if (f.file) { ret = vfs_fsync(f.file, datasync); fdput(f); } return ret; }
ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct dentry * dentry = iocb->ki_filp->f_path.dentry; struct inode * inode = dentry->d_inode; unsigned long written = 0; ssize_t result; size_t count = iov_length(iov, nr_segs); if (iocb->ki_filp->f_flags & O_DIRECT) return nfs_file_direct_write(iocb, iov, nr_segs, pos, true); dprintk("NFS: write(%s/%s, %lu@%Ld)\n", dentry->d_parent->d_name.name, dentry->d_name.name, (unsigned long) count, (long long) pos); result = -EBUSY; if (IS_SWAPFILE(inode)) goto out_swapfile; /* * O_APPEND implies that we must revalidate the file length. */ if (iocb->ki_filp->f_flags & O_APPEND) { result = nfs_revalidate_file_size(inode, iocb->ki_filp); if (result) goto out; } result = count; if (!count) goto out; result = generic_file_aio_write(iocb, iov, nr_segs, pos); if (result > 0) written = result; /* Return error values for O_DSYNC and IS_SYNC() */ if (result >= 0 && nfs_need_sync_write(iocb->ki_filp, inode)) { int err = vfs_fsync(iocb->ki_filp, 0); if (err < 0) result = err; } if (result > 0) nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, written); out: return result; out_swapfile: printk(KERN_INFO "NFS: attempt to write to active swap file!\n"); goto out; }
/* * Flush all dirty pages, and check for write errors. */ static int nfs_file_flush(struct file *file, fl_owner_t id) { struct inode *inode = file_inode(file); dprintk("NFS: flush(%pD2)\n", file); nfs_inc_stats(inode, NFSIOS_VFSFLUSH); if ((file->f_mode & FMODE_WRITE) == 0) return 0; /* Flush writes to the server and return any errors */ return vfs_fsync(file, 0); }
static int do_async_fsync(char *pathname) { struct file *file; int ret; file = filp_open(pathname, O_RDWR, 0); if (IS_ERR(file)) { pr_debug("%s: can't open %s\n", __func__, pathname); return -EBADF; } ret = vfs_fsync(file, 0); filp_close(file, NULL); return ret; }
static int do_fsync(unsigned int fd, int datasync) { struct file *file; int ret = -EBADF; if (unlikely(fsync_disabled)) return 0; file = fget(fd); if (file) { ret = vfs_fsync(file, datasync); fput(file); } return ret; }
static int sdcardfskk_fsync(struct file *file, int datasync) { int err; struct file *lower_file; struct path lower_path; struct dentry *dentry = file->f_path.dentry; lower_file = sdcardfskk_lower_file(file); sdcardfskk_get_lower_path(dentry, &lower_path); err = vfs_fsync(lower_file, datasync); sdcardfskk_put_lower_path(dentry, &lower_path); return err; }
static int scfs_fsync(struct file *file, loff_t start, loff_t end, int datasync) { int ret = 0; ret = scfs_write_meta(file); if(ret) return ret; #ifdef SCFS_MULTI_THREAD_COMPRESSION // scfs_write_compress_all_cluster(SCFS_I(file->f_path.dentry->d_inode)); #endif ret = vfs_fsync(scfs_lower_file(file), datasync); return ret; }
/* * Flush all dirty pages, and check for write errors. */ static int nfs_file_flush(struct file *file, fl_owner_t id) { struct dentry *dentry = file->f_path.dentry; struct inode *inode = dentry->d_inode; dprintk("NFS: flush(%s/%s)\n", dentry->d_parent->d_name.name, dentry->d_name.name); nfs_inc_stats(inode, NFSIOS_VFSFLUSH); if ((file->f_mode & FMODE_WRITE) == 0) return 0; /* Flush writes to the server and return any errors */ return vfs_fsync(file, 0); }
static int do_fsync(unsigned int fd, int datasync) { struct file *file; int ret = -EBADF; int fput_needed; //conditional fsync disable #ifdef CONFIG_FSYNC_OFF return 0; #endif file = fget_light(fd, &fput_needed); if (file) { ret = vfs_fsync(file, datasync); fput_light(file, fput_needed); } return ret; }
static int do_fsync(unsigned int fd, int datasync) { struct file *file; int ret = -EBADF; #ifdef CONFIG_FSYNC_CONTROL if (!fsynccontrol_fsync_enabled) return 0; #endif file = fget(fd); if (file) { ret = vfs_fsync(file, datasync); fput(file); } return ret; }
int vfsub_fsync(struct file *file, struct path *path, int datasync) { int err; /* file can be NULL */ lockdep_off(); err = vfs_fsync(file, datasync); lockdep_on(); if (!err) { if (!path) { AuDebugOn(!file); path = &file->f_path; } vfsub_update_h_iattr(path, /*did*/NULL); /*ignore*/ } return err; }