/** * generic_file_splice_write_nolock - generic_file_splice_write without mutexes * @pipe: pipe info * @out: file to write to * @len: number of bytes to splice * @flags: splice modifier flags * * Will either move or copy pages (determined by @flags options) from * the given pipe inode to the given file. The caller is responsible * for acquiring i_mutex on both inodes. * */ ssize_t generic_file_splice_write_nolock(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { struct address_space *mapping = out->f_mapping; struct inode *inode = mapping->host; ssize_t ret; int err; err = remove_suid(out->f_path.dentry); if (unlikely(err)) return err; ret = __splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_file); if (ret > 0) { unsigned long nr_pages; *ppos += ret; nr_pages = (ret + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; /* * If file or inode is SYNC and we actually wrote some data, * sync it. */ if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(inode))) { err = generic_osync_inode(inode, mapping, OSYNC_METADATA|OSYNC_DATA); if (err) ret = err; } balance_dirty_pages_ratelimited_nr(mapping, nr_pages); }
static ssize_t svfs_file_splice_write(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { struct address_space *mapping; struct inode *inode; struct file *llfs_filp; struct svfs_inode *si; struct splice_desc sd = {0,}; ssize_t ret; svfs_entry(mdc, "pos %lu, len %ld, flags 0x%x\n", (unsigned long)*ppos, (long)len, flags); si = SVFS_I(out->f_dentry->d_inode); if (si->state & SVFS_STATE_DA) { /* create it now */ ASSERT(!(si->state & SVFS_STATE_CONN)); ret = llfs_create(out->f_dentry); if (ret) goto out; } if (!(si->state & SVFS_STATE_CONN)) { /* open it ? */ ret = llfs_lookup(out->f_dentry->d_inode); if (ret) goto out; } llfs_filp = si->llfs_md.llfs_filp; ASSERT(llfs_filp); mapping = llfs_filp->f_mapping; inode = mapping->host; sd.total_len = len; sd.flags = flags; sd.pos = *ppos; sd.u.file = llfs_filp; pipe_lock(pipe); splice_from_pipe_begin(&sd); do { ret = splice_from_pipe_next(pipe, &sd); if (ret <= 0) break; mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD); ret = file_remove_suid(out); if (!ret) ret = splice_from_pipe_feed(pipe, &sd, pipe_to_file); mutex_unlock(&inode->i_mutex); } while (ret > 0); splice_from_pipe_end(pipe, &sd); pipe_unlock(pipe); if (sd.num_spliced) ret = sd.num_spliced; if (ret > 0) { unsigned long nr_pages; *ppos += ret; nr_pages = (ret + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; /* * If file or inode is SYNC and we actually wrote some data, * sync it. */ if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(inode))) { int err; mutex_lock(&inode->i_mutex); err = generic_osync_inode(inode, mapping, OSYNC_METADATA|OSYNC_DATA); mutex_unlock(&inode->i_mutex); if (err) ret = err; } balance_dirty_pages_ratelimited_nr(mapping, nr_pages); }
/* * Almost copy of generic_splice_write() (added changed_begin/end, * tux3_iattrdirty()). */ static ssize_t tux3_file_splice_write(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { struct address_space *mapping = out->f_mapping; struct inode *inode = mapping->host; struct sb *sb = tux_sb(inode->i_sb); struct splice_desc sd = { .total_len = len, .flags = flags, .pos = *ppos, .u.file = out, }; ssize_t ret; sb_start_write(inode->i_sb); pipe_lock(pipe); splice_from_pipe_begin(&sd); do { ret = splice_from_pipe_next(pipe, &sd); if (ret <= 0) break; mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD); /* For each ->write_end() calls change_end(). */ change_begin(sb); /* For timestamp. FIXME: convert this to ->update_time * handler? */ tux3_iattrdirty(inode); ret = file_remove_suid(out); if (!ret) { ret = file_update_time(out); if (!ret) ret = splice_from_pipe_feed(pipe, &sd, pipe_to_file); } change_end_if_needed(sb); mutex_unlock(&inode->i_mutex); } while (ret > 0); splice_from_pipe_end(pipe, &sd); pipe_unlock(pipe); if (sd.num_spliced) ret = sd.num_spliced; if (ret > 0) { unsigned long nr_pages; int err; nr_pages = (ret + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; err = generic_write_sync(out, *ppos, ret); if (err) ret = err; else *ppos += ret; balance_dirty_pages_ratelimited_nr(mapping, nr_pages); } sb_end_write(inode->i_sb); return ret; }
asmlinkage long sys_msync(unsigned long start, size_t len, int flags) { unsigned long end; struct vm_area_struct *vma; int unmapped_error = 0; int error = -EINVAL; int done = 0; if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC)) goto out; if (start & ~PAGE_MASK) goto out; if ((flags & MS_ASYNC) && (flags & MS_SYNC)) goto out; error = -ENOMEM; len = (len + ~PAGE_MASK) & PAGE_MASK; end = start + len; if (end < start) goto out; error = 0; if (end == start) goto out; /* * If the interval [start,end) covers some unmapped address ranges, * just ignore them, but return -ENOMEM at the end. */ down_read(¤t->mm->mmap_sem); vma = find_vma(current->mm, start); if (!vma) { error = -ENOMEM; goto out_unlock; } do { unsigned long nr_pages_dirtied = 0; struct file *file; /* Here start < vma->vm_end. */ if (start < vma->vm_start) { unmapped_error = -ENOMEM; start = vma->vm_start; } /* Here vma->vm_start <= start < vma->vm_end. */ if (end <= vma->vm_end) { if (start < end) { error = msync_interval(vma, start, end, flags, &nr_pages_dirtied); if (error) goto out_unlock; } error = unmapped_error; done = 1; } else { /* Here vma->vm_start <= start < vma->vm_end < end. */ error = msync_interval(vma, start, vma->vm_end, flags, &nr_pages_dirtied); if (error) goto out_unlock; } file = vma->vm_file; start = vma->vm_end; if ((flags & MS_ASYNC) && file && nr_pages_dirtied) { get_file(file); up_read(¤t->mm->mmap_sem); balance_dirty_pages_ratelimited_nr(file->f_mapping, nr_pages_dirtied); fput(file); down_read(¤t->mm->mmap_sem); vma = find_vma(current->mm, start); } else if ((flags & MS_SYNC) && file && (vma->vm_flags & VM_SHARED)) { get_file(file); up_read(¤t->mm->mmap_sem); error = do_fsync(file, 0); fput(file); down_read(¤t->mm->mmap_sem); if (error) goto out_unlock; vma = find_vma(current->mm, start); } else { vma = vma->vm_next; } } while (vma && !done); out_unlock: up_read(¤t->mm->mmap_sem); out: return error; }