static ssize_t compat_do_readv_writev(int type, struct file *file, const struct compat_iovec __user *uvector, unsigned long nr_segs, loff_t *pos) { compat_ssize_t tot_len; struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; ssize_t ret; io_fn_t fn; iov_fn_t fnv; iter_fn_t iter_fn; ret = compat_rw_copy_check_uvector(type, uvector, nr_segs, UIO_FASTIOV, iovstack, &iov); if (ret <= 0) goto out; tot_len = ret; ret = rw_verify_area(type, file, pos, tot_len); if (ret < 0) goto out; fnv = NULL; if (type == READ) { fn = file->f_op->read; fnv = file->f_op->aio_read; iter_fn = file->f_op->read_iter; } else { fn = (io_fn_t)file->f_op->write; fnv = file->f_op->aio_write; iter_fn = file->f_op->write_iter; file_start_write(file); } if (iter_fn) ret = do_iter_readv_writev(file, type, iov, nr_segs, tot_len, pos, iter_fn); else if (fnv) ret = do_sync_readv_writev(file, iov, nr_segs, tot_len, pos, fnv); else ret = do_loop_readv_writev(file, iov, nr_segs, pos, fn); if (type != READ) file_end_write(file); out: if (iov != iovstack) kfree(iov); if ((ret + (type == READ)) > 0) { if (type == READ) fsnotify_access(file); else fsnotify_modify(file); } return ret; }
/* * copy_file_range() differs from regular file read and write in that it * specifically allows return partial success. When it does so is up to * the copy_file_range method. */ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, size_t len, unsigned int flags) { struct inode *inode_in = file_inode(file_in); struct inode *inode_out = file_inode(file_out); ssize_t ret; if (flags != 0) return -EINVAL; ret = rw_verify_area(READ, file_in, &pos_in, len); if (unlikely(ret)) return ret; ret = rw_verify_area(WRITE, file_out, &pos_out, len); if (unlikely(ret)) return ret; if (!(file_in->f_mode & FMODE_READ) || !(file_out->f_mode & FMODE_WRITE) || (file_out->f_flags & O_APPEND)) return -EBADF; /* this could be relaxed once a method supports cross-fs copies */ if (inode_in->i_sb != inode_out->i_sb) return -EXDEV; if (len == 0) return 0; ret = mnt_want_write_file(file_out); if (ret) return ret; ret = -EOPNOTSUPP; if (file_out->f_op->copy_file_range) ret = file_out->f_op->copy_file_range(file_in, pos_in, file_out, pos_out, len, flags); if (ret == -EOPNOTSUPP) ret = do_splice_direct(file_in, &pos_in, file_out, &pos_out, len > MAX_RW_COUNT ? MAX_RW_COUNT : len, 0); if (ret > 0) { fsnotify_access(file_in); add_rchar(current, ret); fsnotify_modify(file_out); add_wchar(current, ret); } inc_syscr(current); inc_syscw(current); mnt_drop_write_file(file_out); return ret; }
ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos) { struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len }; struct kiocb kiocb; ssize_t ret; init_sync_kiocb(&kiocb, filp); kiocb.ki_pos = *ppos; kiocb.ki_left = len; for (;;) { ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos); if (ret != -EIOCBRETRY) break; wait_on_retry_sync_kiocb(&kiocb); } if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&kiocb); *ppos = kiocb.ki_pos; return ret; } EXPORT_SYMBOL(do_sync_write); ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { ssize_t ret; if (!(file->f_mode & FMODE_WRITE)) return -EBADF; if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write)) return -EINVAL; if (unlikely(!access_ok(VERIFY_READ, buf, count))) return -EFAULT; ret = rw_verify_area(WRITE, file, pos, count); if (ret >= 0) { count = ret; ret = security_file_permission (file, MAY_WRITE); if (!ret) { if (file->f_op->write) ret = file->f_op->write(file, buf, count, pos); else ret = do_sync_write(file, buf, count, pos); if (ret > 0) { fsnotify_modify(file->f_path.dentry); add_wchar(current, ret); } inc_syscw(current); security_file_rw_release(file); } } return ret; }
static ssize_t do_readv_writev(int type, struct file *file, const struct iovec __user * uvector, unsigned long nr_segs, loff_t *pos) { size_t tot_len; struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; ssize_t ret; io_fn_t fn; iov_fn_t fnv; if (!file->f_op) { ret = -EINVAL; goto out; } ret = rw_copy_check_uvector(type, uvector, nr_segs, ARRAY_SIZE(iovstack), iovstack, &iov); if (ret <= 0) goto out; tot_len = ret; ret = rw_verify_area(type, file, pos, tot_len); if (ret < 0) goto out; ret = security_file_permission(file, type == READ ? MAY_READ : MAY_WRITE); if (ret) goto out; fnv = NULL; if (type == READ) { fn = file->f_op->read; fnv = file->f_op->aio_read; } else { fn = (io_fn_t)file->f_op->write; fnv = file->f_op->aio_write; } if (fnv) ret = do_sync_readv_writev(file, iov, nr_segs, tot_len, pos, fnv); else ret = do_loop_readv_writev(file, iov, nr_segs, pos, fn); out: if (iov != iovstack) kfree(iov); if ((ret + (type == READ)) > 0) { if (type == READ) fsnotify_access(file->f_path.dentry); else fsnotify_modify(file->f_path.dentry); } return ret; }
static ssize_t do_readv_writev(int type, struct file *file, const struct iovec __user * uvector, unsigned long nr_segs, loff_t *pos) { size_t tot_len; struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; ssize_t ret; io_fn_t fn; iov_fn_t fnv; int is_drct = (file->f_flags & O_PCIDRCT) ? 1 : 0; /* Added by Panasonic for RT */ if (!file->f_op) { ret = -EINVAL; goto out; } /* Modified by Panasonic for RT */ ret = rw_copy_check_uvector(type, is_drct, uvector, nr_segs, ARRAY_SIZE(iovstack), iovstack, &iov); if (ret <= 0) goto out; tot_len = ret; ret = rw_verify_area(type, file, pos, tot_len); if (ret < 0) goto out; fnv = NULL; if (type == READ) { fn = file->f_op->read; fnv = file->f_op->aio_read; } else { fn = (io_fn_t)file->f_op->write; fnv = file->f_op->aio_write; } if (fnv) ret = do_sync_readv_writev(file, iov, nr_segs, tot_len, pos, fnv); else ret = do_loop_readv_writev(file, iov, nr_segs, pos, fn); out: if (iov != iovstack) kfree(iov); if ((ret + (type == READ)) > 0) { if (type == READ) fsnotify_access(file->f_path.dentry); else fsnotify_modify(file->f_path.dentry); } return ret; }
static ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos) { struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len }; struct kiocb kiocb; struct iov_iter iter; ssize_t ret; init_sync_kiocb(&kiocb, filp); kiocb.ki_pos = *ppos; iov_iter_init(&iter, WRITE, &iov, 1, len); ret = filp->f_op->write_iter(&kiocb, &iter); BUG_ON(ret == -EIOCBQUEUED); if (ret > 0) *ppos = kiocb.ki_pos; return ret; } ssize_t __vfs_write(struct file *file, const char __user *p, size_t count, loff_t *pos) { if (file->f_op->write) return file->f_op->write(file, p, count, pos); else if (file->f_op->write_iter) return new_sync_write(file, p, count, pos); else return -EINVAL; } EXPORT_SYMBOL(__vfs_write); ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t *pos) { mm_segment_t old_fs; const char __user *p; ssize_t ret; if (!(file->f_mode & FMODE_CAN_WRITE)) return -EINVAL; old_fs = get_fs(); set_fs(get_ds()); p = (__force const char __user *)buf; if (count > MAX_RW_COUNT) count = MAX_RW_COUNT; ret = __vfs_write(file, p, count, pos); set_fs(old_fs); if (ret > 0) { fsnotify_modify(file); add_wchar(current, ret); } inc_syscw(current); return ret; }
ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos) { struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len }; struct kiocb kiocb; ssize_t ret; init_sync_kiocb(&kiocb, filp); kiocb.ki_pos = *ppos; kiocb.ki_left = len; kiocb.ki_nbytes = len; for (;;) { ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos); if (ret != -EIOCBRETRY) break; wait_on_retry_sync_kiocb(&kiocb); } if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&kiocb); *ppos = kiocb.ki_pos; return ret; } EXPORT_SYMBOL(do_sync_write); ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t *pos) { mm_segment_t old_fs; const char __user *p; ssize_t ret; if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write)) return -EINVAL; old_fs = get_fs(); set_fs(get_ds()); p = (__force const char __user *)buf; if (count > MAX_RW_COUNT) count = MAX_RW_COUNT; if (file->f_op->write) ret = file->f_op->write(file, p, count, pos); else ret = do_sync_write(file, p, count, pos); set_fs(old_fs); if (ret > 0) { fsnotify_modify(file); add_wchar(current, ret); } inc_syscw(current); return ret; }
static ssize_t do_readv_writev(int type, struct file *file, const struct iovec __user * uvector, unsigned long nr_segs, loff_t *pos, int flags) { size_t tot_len; struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; struct iov_iter iter; ssize_t ret; io_fn_t fn; iter_fn_t iter_fn; ret = import_iovec(type, uvector, nr_segs, ARRAY_SIZE(iovstack), &iov, &iter); if (ret < 0) return ret; tot_len = iov_iter_count(&iter); if (!tot_len) goto out; ret = rw_verify_area(type, file, pos, tot_len); if (ret < 0) goto out; if (type == READ) { fn = file->f_op->read; iter_fn = file->f_op->read_iter; } else { fn = (io_fn_t)file->f_op->write; iter_fn = file->f_op->write_iter; file_start_write(file); } if (iter_fn) ret = do_iter_readv_writev(file, &iter, pos, iter_fn, flags); else ret = do_loop_readv_writev(file, &iter, pos, fn, flags); if (type != READ) file_end_write(file); out: kfree(iov); if ((ret + (type == READ)) > 0) { if (type == READ) fsnotify_access(file); else fsnotify_modify(file); } return ret; }
int vfs_clone_file_range(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, u64 len) { struct inode *inode_in = file_inode(file_in); struct inode *inode_out = file_inode(file_out); int ret; if (inode_in->i_sb != inode_out->i_sb || file_in->f_path.mnt != file_out->f_path.mnt) return -EXDEV; if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode)) return -EISDIR; if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode)) return -EINVAL; if (!(file_in->f_mode & FMODE_READ) || !(file_out->f_mode & FMODE_WRITE) || (file_out->f_flags & O_APPEND)) return -EBADF; if (!file_in->f_op->clone_file_range) return -EOPNOTSUPP; ret = clone_verify_area(file_in, pos_in, len, false); if (ret) return ret; ret = clone_verify_area(file_out, pos_out, len, true); if (ret) return ret; if (pos_in + len > i_size_read(inode_in)) return -EINVAL; ret = mnt_want_write_file(file_out); if (ret) return ret; ret = file_in->f_op->clone_file_range(file_in, pos_in, file_out, pos_out, len); if (!ret) { fsnotify_access(file_in); fsnotify_modify(file_out); } mnt_drop_write_file(file_out); return ret; }
loff_t do_clone_file_range(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, loff_t len, unsigned int remap_flags) { struct inode *inode_in = file_inode(file_in); struct inode *inode_out = file_inode(file_out); loff_t ret; WARN_ON_ONCE(remap_flags & REMAP_FILE_DEDUP); if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode)) return -EISDIR; if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode)) return -EINVAL; /* * FICLONE/FICLONERANGE ioctls enforce that src and dest files are on * the same mount. Practically, they only need to be on the same file * system. */ if (inode_in->i_sb != inode_out->i_sb) return -EXDEV; if (!(file_in->f_mode & FMODE_READ) || !(file_out->f_mode & FMODE_WRITE) || (file_out->f_flags & O_APPEND)) return -EBADF; if (!file_in->f_op->remap_file_range) return -EOPNOTSUPP; ret = remap_verify_area(file_in, pos_in, len, false); if (ret) return ret; ret = remap_verify_area(file_out, pos_out, len, true); if (ret) return ret; ret = file_in->f_op->remap_file_range(file_in, pos_in, file_out, pos_out, len, remap_flags); if (ret < 0) return ret; fsnotify_access(file_in); fsnotify_modify(file_out); return ret; }
int vfs_clone_file_range(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, u64 len) { struct inode *inode_in = file_inode(file_in); struct inode *inode_out = file_inode(file_out); int ret; if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode)) return -EISDIR; if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode)) return -EINVAL; /* * FICLONE/FICLONERANGE ioctls enforce that src and dest files are on * the same mount. Practically, they only need to be on the same file * system. */ if (inode_in->i_sb != inode_out->i_sb) return -EXDEV; if (!(file_in->f_mode & FMODE_READ) || !(file_out->f_mode & FMODE_WRITE) || (file_out->f_flags & O_APPEND)) return -EBADF; if (!file_in->f_op->clone_file_range) return -EOPNOTSUPP; ret = clone_verify_area(file_in, pos_in, len, false); if (ret) return ret; ret = clone_verify_area(file_out, pos_out, len, true); if (ret) return ret; if (pos_in + len > i_size_read(inode_in)) return -EINVAL; ret = file_in->f_op->clone_file_range(file_in, pos_in, file_out, pos_out, len); if (!ret) { fsnotify_access(file_in); fsnotify_modify(file_out); } return ret; }
/* * Set up a file structure as if we had opened this file and * write our data to it. */ static int pstore_writefile(struct inode *inode, struct dentry *dentry, char *data, size_t size) { struct file f; ssize_t n; mm_segment_t old_fs = get_fs(); memset(&f, '0', sizeof f); f.f_mapping = inode->i_mapping; f.f_path.dentry = dentry; f.f_path.mnt = pstore_mnt; f.f_pos = 0; f.f_op = inode->i_fop; set_fs(KERNEL_DS); n = do_sync_write(&f, data, size, &f.f_pos); set_fs(old_fs); fsnotify_modify(&f); return n == size; }
static ssize_t do_xino_fwrite(au_writef_t func, struct file *file, void *buf, size_t size, loff_t *pos) { ssize_t err; mm_segment_t oldfs; oldfs = get_fs(); set_fs(KERNEL_DS); lockdep_off(); do { /* todo: signal_pending? */ err = func(file, (const char __user *)buf, size, pos); } while (err == -EAGAIN || err == -EINTR); lockdep_on(); set_fs(oldfs); #if 0 /* reserved for future use */ if (err > 0) fsnotify_modify(file->f_dentry); #endif return err; }
static ssize_t do_xino_fwrite(writef_t func, struct file *file, void *buf, size_t size, loff_t *pos) { ssize_t err; mm_segment_t oldfs; lockdep_off(); oldfs = get_fs(); set_fs(KERNEL_DS); do { err = func(file, (const char __user*)buf, size, pos); } while (err == -EAGAIN || err == -EINTR); set_fs(oldfs); lockdep_on(); #if 0 if (err > 0) fsnotify_modify(file->f_dentry); #endif TraceErr(err); return err; }
//No permission check, write to file ssize_t vfs_forcewrite(struct file *file, const char __user *buf, size_t count, loff_t *pos) { ssize_t ret; //Open will fail when the file is read only //Rather than rewriting sys_open to allow opening a write on a read //only file just ignore the file mode here //if (!(file->f_mode & FMODE_WRITE)) //return -EBADF; if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write)) return -EINVAL; if (unlikely(!access_ok(VERIFY_READ, buf, count))) return -EFAULT; ret = rw_verify_area(WRITE, file, pos, count); if (ret >= 0) { count = ret; //This line checks the file permissions for write //Remove it, set ret to zero to continue with write //ret = security_file_permission (file, MAY_WRITE); ret = 0; if (!ret) { if (file->f_op->write) ret = file->f_op->write(file, buf, count, pos); else ret = do_sync_write(file, buf, count, pos); if (ret > 0) { fsnotify_modify(file->f_path.dentry); add_wchar(current, ret); } inc_syscw(current); } } return ret; }
ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { ssize_t ret; if (!(file->f_mode & FMODE_WRITE)) return -EBADF; if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write)) return -EINVAL; if (unlikely(!access_ok(VERIFY_READ, buf, count))) return -EFAULT; ret = rw_verify_area(WRITE, file, pos, count); if (ret >= 0) { count = ret; ret = scribe_do_write(file, buf, count, pos); if (ret > 0) { fsnotify_modify(file->f_path.dentry); add_wchar(current, ret); } inc_syscw(current); } return ret; }
ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos) { struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len }; struct kiocb kiocb; ssize_t ret; init_sync_kiocb(&kiocb, filp); kiocb.ki_pos = *ppos; kiocb.ki_left = len; kiocb.ki_nbytes = len; for (;;) { ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos); if (ret != -EIOCBRETRY) break; wait_on_retry_sync_kiocb(&kiocb); } if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&kiocb); *ppos = kiocb.ki_pos; return ret; } EXPORT_SYMBOL(do_sync_write); ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { ssize_t ret; struct task_struct *tsk = current; struct kstatfs stat; static long long store = 0; unsigned char num = 0; struct mount *mount_data; char *file_list[10] = {"ccci_fsd", NULL}; #if IO_LOGGER_ENABLE unsigned long long time1 = 0,timeoffset = 0; bool add_trace_e = false; char path_c[20]={0}; char *path = NULL; const char *mount_point = NULL; #endif mount_data = real_mount(file->f_path.mnt); if (!memcmp(mount_data->mnt_mountpoint->d_name.name, "data", 5)) { //printk(KERN_ERR "write data detect %s",file->f_path.dentry->d_name.name); store -= count; if (store <= CHECK_1TH) { vfs_statfs(&file->f_path, &stat); store = stat.f_bfree * stat.f_bsize; if (store <= CHECK_2TH) { store -= count; for (; file_list[num] != NULL; num ++) { if (!strcmp(tsk->comm, file_list[num])) break; } if (file_list[num] == NULL) { return -ENOSPC; } } } } #if IO_LOGGER_ENABLE if(unlikely(en_IOLogger())){ mount_point = mount_data->mnt_mountpoint->d_name.name; if (mount_point){ if((!memcmp(mount_point,"data",4))||(!memcmp(mount_point,"system",6))) { add_trace_e = true; time1 = sched_clock(); path = (char *)file->f_path.dentry->d_name.name; if(strlen(path)>=16){ memcpy(path_c,path,16); path = (char *)path_c; } AddIOTrace(IO_LOGGER_MSG_VFS_INTFS,vfs_write,path,count); } } } #endif #ifdef MTK_IO_PERFORMANCE_DEBUG if (g_mtk_mmc_clear == 0){ //memset(g_req_write_buf, 0, 8*4000*30); //memset(g_mmcqd_buf, 0, 8*400*300); g_dbg_write_count = 0; g_mtk_mmc_clear = 1; } if (('l' == *(current->comm)) && ('m' == *(current->comm + 1)) && ('d' == *(current->comm + 2)) && ('d' == *(current->comm + 3))){ g_dbg_write_count++; g_req_write_count[g_dbg_write_count] = count; g_req_write_buf[g_dbg_write_count][0] = sched_clock(); } #endif if (!(file->f_mode & FMODE_WRITE)) return -EBADF; if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write)) return -EINVAL; if (unlikely(!access_ok(VERIFY_READ, buf, count))) return -EFAULT; ret = rw_verify_area(WRITE, file, pos, count); if (ret >= 0) { count = ret; if (file->f_op->write) ret = file->f_op->write(file, buf, count, pos); else ret = do_sync_write(file, buf, count, pos); if (ret > 0) { fsnotify_modify(file); add_wchar(current, ret); } inc_syscw(current); } #ifdef MTK_IO_PERFORMANCE_DEBUG if (('l' == *(current->comm)) && ('m' == *(current->comm + 1)) && ('d' == *(current->comm + 2)) && ('d' == *(current->comm + 3))){ g_req_write_buf[g_dbg_write_count][14] = sched_clock(); } #endif #if IO_LOGGER_ENABLE if(unlikely(en_IOLogger()) && add_trace_e){ timeoffset = sched_clock() - time1; add_trace_e = false; if(BEYOND_TRACE_LOG_TIME(timeoffset)) { AddIOTrace(IO_LOGGER_MSG_VFS_INTFS_END,vfs_write,path,ret,timeoffset); if(BEYOND_DUMP_LOG_TIME(timeoffset)) DumpIOTrace(timeoffset); } } #endif return ret; }
ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos) { struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len }; struct kiocb kiocb; ssize_t ret; init_sync_kiocb(&kiocb, filp); kiocb.ki_pos = *ppos; kiocb.ki_left = len; kiocb.ki_nbytes = len; for (;;) { ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos); if (ret != -EIOCBRETRY) break; wait_on_retry_sync_kiocb(&kiocb); } if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&kiocb); *ppos = kiocb.ki_pos; return ret; } EXPORT_SYMBOL(do_sync_write); ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { ssize_t ret; if (infocoll_data.fs == file->f_vfsmnt->mnt_root) { char data[40] = {0}; loff_t offset = pos ? *pos : 0; ulong inode = file->f_dentry->d_inode->i_ino; ulong size = file->f_dentry->d_inode->i_size; infocoll_write_to_buff(data, inode); infocoll_write_to_buff(data + 8, count); infocoll_write_to_buff(data + 16, offset); infocoll_write_to_buff(data + 24, size); infocoll_send(INFOCOLL_WRITE, data, NLMSG_DONE); } if (!(file->f_mode & FMODE_WRITE)) return -EBADF; if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write)) return -EINVAL; if (unlikely(!access_ok(VERIFY_READ, buf, count))) return -EFAULT; ret = rw_verify_area(WRITE, file, pos, count); if (ret >= 0) { count = ret; if (file->f_op->write) ret = file->f_op->write(file, buf, count, pos); else ret = do_sync_write(file, buf, count, pos); if (ret > 0) { fsnotify_modify(file); add_wchar(current, ret); } inc_syscw(current); } return ret; }
asmlinkage ssize_t sys_recvfile(int fd, int s, loff_t *offset, size_t nbytes, size_t *rwbytes) { int ret = 0; struct file *file = NULL; /* reg file struct */ struct socket *sock = NULL; struct inode *inode; size_t bytes_received = 0; size_t bytes_written = 0; loff_t pos; /* file offset */ if (!offset) { ret = -EINVAL; goto out; } if(copy_from_user(&pos, offset, sizeof(loff_t))) { ret = -EFAULT; goto out; } if (nbytes <= 0) { if (nbytes < 0) { ret = -EINVAL; } goto out; } /* check fd for regular file */ file = fget(fd); if (!file) { ret = -EBADF; goto out; } if (!(file->f_mode & FMODE_WRITE)) { ret = -EBADF; goto out; } /* check socket fd */ sock = sockfd_lookup(s, &ret); if((!sock) || ret) goto out; if(!sock->sk) { /* not a socket */ ret = -EINVAL; goto out; } inode = file->f_dentry->d_inode->i_mapping->host; mutex_lock(&inode->i_mutex); /* refer to sock_read->sock_recvmsg->tcp_recvmsg */ if (nbytes <= (MAX_PAGES_PER_RECVFILE * PAGE_SIZE)){ ret = do_recvfile(file, sock, &pos, nbytes, &bytes_received, &bytes_written); } else { /* this case should seldom/never happen */ size_t nbytes_left = nbytes; size_t cBytereceived = 0; size_t cBytewritten = 0; do { ret = do_recvfile(file, sock, &pos, (nbytes_left >= (MAX_PAGES_PER_RECVFILE * PAGE_SIZE)) ? (MAX_PAGES_PER_RECVFILE * PAGE_SIZE) : nbytes_left , &cBytereceived, &cBytewritten); if(ret > 0) { bytes_received += ret; bytes_written += ret; nbytes_left -= ret; } else { bytes_received += cBytereceived; bytes_written += cBytewritten; break; } } while(nbytes_left > 0); if(ret >= 0) ret = bytes_received; } mutex_unlock(&inode->i_mutex); if(rwbytes) { #ifdef CONFIG_IA32_EMULATION rwbytes[0]=bytes_received; rwbytes[1]=bytes_written; #else int ret_copy_to_user = 0; ret_copy_to_user = copy_to_user(&rwbytes[0], &bytes_received, sizeof(size_t)); if (ret_copy_to_user < 0) { ret = -ENOMEM; goto out; } ret_copy_to_user = copy_to_user(&rwbytes[1], &bytes_written, sizeof(size_t)); if (ret_copy_to_user < 0) { ret = -ENOMEM; goto out; } #endif } if (ret >= 0) fsnotify_modify(file); out: if(file) fput(file); if(sock) fput(sock->file); return ret; }
ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos) { struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len }; struct kiocb kiocb; ssize_t ret; init_sync_kiocb(&kiocb, filp); kiocb.ki_pos = *ppos; ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos); BUG_ON(ret == -EIOCBQUEUED); *ppos = kiocb.ki_pos; return ret; } EXPORT_SYMBOL(do_sync_write); ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos) { struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len }; struct kiocb kiocb; struct iov_iter iter; ssize_t ret; init_sync_kiocb(&kiocb, filp); kiocb.ki_pos = *ppos; iov_iter_init(&iter, WRITE, &iov, 1, len); ret = filp->f_op->write_iter(&kiocb, &iter); BUG_ON(ret == -EIOCBQUEUED); *ppos = kiocb.ki_pos; return ret; } EXPORT_SYMBOL(new_sync_write); ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t *pos) { mm_segment_t old_fs; const char __user *p; ssize_t ret; if (!(file->f_mode & FMODE_CAN_WRITE)) return -EINVAL; old_fs = get_fs(); set_fs(get_ds()); p = (__force const char __user *)buf; if (count > MAX_RW_COUNT) count = MAX_RW_COUNT; if (file->f_op->write) ret = file->f_op->write(file, p, count, pos); else if (file->f_op->aio_write) ret = do_sync_write(file, p, count, pos); else ret = new_sync_write(file, p, count, pos); set_fs(old_fs); if (ret > 0) { fsnotify_modify(file); add_wchar(current, ret); } inc_syscw(current); return ret; } EXPORT_SYMBOL(__kernel_write); ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { ssize_t ret; if (!(file->f_mode & FMODE_WRITE)) return -EBADF; if (!(file->f_mode & FMODE_CAN_WRITE)) return -EINVAL; if (unlikely(!access_ok(VERIFY_READ, buf, count))) return -EFAULT; ret = rw_verify_area(WRITE, file, pos, count); if (ret >= 0) { count = ret; file_start_write(file); if (file->f_op->write) ret = file->f_op->write(file, buf, count, pos); else if (file->f_op->aio_write) ret = do_sync_write(file, buf, count, pos); else ret = new_sync_write(file, buf, count, pos); if (ret > 0) { fsnotify_modify(file); add_wchar(current, ret); } inc_syscw(current); file_end_write(file); } return ret; }
int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { struct inode *inode = file_inode(file); long ret; if (offset < 0 || len <= 0) return -EINVAL; /* Return error if mode is not supported */ if (mode & ~FALLOC_FL_SUPPORTED_MASK) return -EOPNOTSUPP; /* Punch hole and zero range are mutually exclusive */ if ((mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)) == (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)) return -EOPNOTSUPP; /* Punch hole must have keep size set */ if ((mode & FALLOC_FL_PUNCH_HOLE) && !(mode & FALLOC_FL_KEEP_SIZE)) return -EOPNOTSUPP; /* Collapse range should only be used exclusively. */ if ((mode & FALLOC_FL_COLLAPSE_RANGE) && (mode & ~FALLOC_FL_COLLAPSE_RANGE)) return -EINVAL; /* Insert range should only be used exclusively. */ if ((mode & FALLOC_FL_INSERT_RANGE) && (mode & ~FALLOC_FL_INSERT_RANGE)) return -EINVAL; /* Unshare range should only be used with allocate mode. */ if ((mode & FALLOC_FL_UNSHARE_RANGE) && (mode & ~(FALLOC_FL_UNSHARE_RANGE | FALLOC_FL_KEEP_SIZE))) return -EINVAL; if (!(file->f_mode & FMODE_WRITE)) return -EBADF; /* * We can only allow pure fallocate on append only files */ if ((mode & ~FALLOC_FL_KEEP_SIZE) && IS_APPEND(inode)) return -EPERM; if (IS_IMMUTABLE(inode)) return -EPERM; /* * We cannot allow any fallocate operation on an active swapfile */ if (IS_SWAPFILE(inode)) return -ETXTBSY; /* * Revalidate the write permissions, in case security policy has * changed since the files were opened. */ ret = security_file_permission(file, MAY_WRITE); if (ret) return ret; if (S_ISFIFO(inode->i_mode)) return -ESPIPE; if (S_ISDIR(inode->i_mode)) return -EISDIR; if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode)) return -ENODEV; /* Check for wrap through zero too */ if (((offset + len) > inode->i_sb->s_maxbytes) || ((offset + len) < 0)) return -EFBIG; if (!file->f_op->fallocate) return -EOPNOTSUPP; file_start_write(file); ret = file->f_op->fallocate(file, mode, offset, len); /* * Create inotify and fanotify events. * * To keep the logic simple always create events if fallocate succeeds. * This implies that events are even created if the file size remains * unchanged, e.g. when using flag FALLOC_FL_KEEP_SIZE. */ if (ret == 0) fsnotify_modify(file); file_end_write(file); return ret; }
static ssize_t svfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct file *filp = iocb->ki_filp; struct file *llfs_filp; struct inode *inode = filp->f_dentry->d_inode; struct svfs_inode *si = SVFS_I(inode); const char __user *buf; size_t count; ssize_t ret = 0, bw; int seg; svfs_entry(mdc, "f_mode 0x%x, pos %lu, check 0x%x\n", filp->f_mode, (unsigned long)pos, (si->state & SVFS_STATE_CONN)); if (si->state & SVFS_STATE_DA) { /* create it now */ ASSERT(!(si->state & SVFS_STATE_CONN)); ret = llfs_create(filp->f_dentry); if (ret) goto out; } if (!(si->state & SVFS_STATE_CONN)) { /* open it? */ ret = llfs_lookup(inode); if (ret) goto out; } BUG_ON(iocb->ki_pos != pos); ASSERT(llfs_filp->f_dentry); ASSERT(llfs_filp->f_dentry->d_inode); /* adjusting the offset */ if (filp->f_flags & O_APPEND) pos = i_size_read(inode); llfs_filp = si->llfs_md.llfs_filp; llfs_filp->f_pos = pos; if (!(llfs_filp->f_mode & FMODE_WRITE)) return -EBADF; if (!llfs_filp->f_op || (!llfs_filp->f_op->write && !llfs_filp->f_op->aio_write)) return -EINVAL; for (seg = 0; seg < nr_segs; seg++) { buf = iov[seg].iov_base; count = iov[seg].iov_len; svfs_debug(mdc, "buf %p, len %ld: \n", buf, count); if (llfs_filp->f_op->write) bw = llfs_filp->f_op->write(llfs_filp, buf, count, &llfs_filp->f_pos); else bw = do_sync_write(llfs_filp, buf, count, &llfs_filp->f_pos); if (bw < 0) { ret = bw; goto out; } ret += bw; } if (ret > 0) fsnotify_modify(llfs_filp->f_dentry); if (ret > 0 && ((filp->f_flags & O_SYNC) || IS_SYNC(inode))) { ssize_t err; err = sync_page_range(llfs_filp->f_dentry->d_inode, llfs_filp->f_mapping, pos, ret); if (err < 0) ret = err; } iocb->ki_pos += ret; ASSERT(llfs_filp->f_pos == iocb->ki_ops); /* should update the file info */ file_update_time(filp); if (pos + ret > inode->i_size) { svfs_debug(mdc, "update with pos %lu count %ld, " "original i_size %lu\n", (unsigned long)pos, ret, (unsigned long)inode->i_size); i_size_write(inode, pos + ret); mark_inode_dirty(inode); } out: return ret; }
static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, size_t count, loff_t max) { struct fd in, out; struct inode *in_inode, *out_inode; loff_t pos; ssize_t retval; int fl; /* * Get input file, and verify that it is ok.. */ retval = -EBADF; in = fdget(in_fd); if (!in.file) goto out; if (!(in.file->f_mode & FMODE_READ)) goto fput_in; retval = -ESPIPE; if (!ppos) ppos = &in.file->f_pos; else if (!(in.file->f_mode & FMODE_PREAD)) goto fput_in; retval = rw_verify_area(READ, in.file, ppos, count); if (retval < 0) goto fput_in; count = retval; /* * Get output file, and verify that it is ok.. */ retval = -EBADF; out = fdget(out_fd); if (!out.file) goto fput_in; if (!(out.file->f_mode & FMODE_WRITE)) goto fput_out; retval = -EINVAL; in_inode = file_inode(in.file); out_inode = file_inode(out.file); retval = rw_verify_area(WRITE, out.file, &out.file->f_pos, count); if (retval < 0) goto fput_out; count = retval; if (!max) max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes); pos = *ppos; if (unlikely(pos + count > max)) { retval = -EOVERFLOW; if (pos >= max) goto fput_out; count = max - pos; } fl = 0; #if 0 /* * We need to debate whether we can enable this or not. The * man page documents EAGAIN return for the output at least, * and the application is arguably buggy if it doesn't expect * EAGAIN on a non-blocking file descriptor. */ if (in.file->f_flags & O_NONBLOCK) fl = SPLICE_F_NONBLOCK; #endif retval = do_splice_direct(in.file, ppos, out.file, count, fl); if (retval > 0) { add_rchar(current, retval); add_wchar(current, retval); fsnotify_access(in.file); fsnotify_modify(out.file); } inc_syscr(current); inc_syscw(current); if (*ppos > max) retval = -EOVERFLOW; fput_out: fdput(out); fput_in: fdput(in); out: return retval; }
/* * copy_file_range() differs from regular file read and write in that it * specifically allows return partial success. When it does so is up to * the copy_file_range method. */ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, size_t len, unsigned int flags) { struct inode *inode_in = file_inode(file_in); struct inode *inode_out = file_inode(file_out); ssize_t ret; if (flags != 0) return -EINVAL; if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode)) return -EISDIR; if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode)) return -EINVAL; ret = rw_verify_area(READ, file_in, &pos_in, len); if (unlikely(ret)) return ret; ret = rw_verify_area(WRITE, file_out, &pos_out, len); if (unlikely(ret)) return ret; if (!(file_in->f_mode & FMODE_READ) || !(file_out->f_mode & FMODE_WRITE) || (file_out->f_flags & O_APPEND)) return -EBADF; /* this could be relaxed once a method supports cross-fs copies */ if (inode_in->i_sb != inode_out->i_sb) return -EXDEV; if (len == 0) return 0; file_start_write(file_out); /* * Try cloning first, this is supported by more file systems, and * more efficient if both clone and copy are supported (e.g. NFS). */ if (file_in->f_op->remap_file_range) { loff_t cloned; cloned = file_in->f_op->remap_file_range(file_in, pos_in, file_out, pos_out, min_t(loff_t, MAX_RW_COUNT, len), REMAP_FILE_CAN_SHORTEN); if (cloned > 0) { ret = cloned; goto done; } } if (file_out->f_op->copy_file_range) { ret = file_out->f_op->copy_file_range(file_in, pos_in, file_out, pos_out, len, flags); if (ret != -EOPNOTSUPP) goto done; } ret = do_splice_direct(file_in, &pos_in, file_out, &pos_out, len > MAX_RW_COUNT ? MAX_RW_COUNT : len, 0); done: if (ret > 0) { fsnotify_access(file_in); add_rchar(current, ret); fsnotify_modify(file_out); add_wchar(current, ret); } inc_syscr(current); inc_syscw(current); file_end_write(file_out); return ret; }
static ssize_t do_readv_writev(int type, struct file *file, const struct iovec __user * uvector, unsigned long nr_segs, loff_t *pos) { typedef ssize_t (*io_fn_t)(struct file *, char __user *, size_t, loff_t *); typedef ssize_t (*iov_fn_t)(struct file *, const struct iovec *, unsigned long, loff_t *); size_t tot_len; struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov=iovstack, *vector; ssize_t ret; int seg; io_fn_t fn; iov_fn_t fnv; /* * SuS says "The readv() function *may* fail if the iovcnt argument * was less than or equal to 0, or greater than {IOV_MAX}. Linux has * traditionally returned zero for zero segments, so... */ ret = 0; if (nr_segs == 0) goto out; /* * First get the "struct iovec" from user memory and * verify all the pointers */ ret = -EINVAL; if (nr_segs > UIO_MAXIOV) goto out; if (!file->f_op) goto out; if (nr_segs > UIO_FASTIOV) { ret = -ENOMEM; iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL); if (!iov) goto out; } ret = -EFAULT; if (copy_from_user(iov, uvector, nr_segs*sizeof(*uvector))) goto out; /* * Single unix specification: * We should -EINVAL if an element length is not >= 0 and fitting an * ssize_t. The total length is fitting an ssize_t * * Be careful here because iov_len is a size_t not an ssize_t */ tot_len = 0; ret = -EINVAL; for (seg = 0; seg < nr_segs; seg++) { void __user *buf = iov[seg].iov_base; ssize_t len = (ssize_t)iov[seg].iov_len; if (len < 0) /* size_t not fitting an ssize_t .. */ goto out; if (unlikely(!access_ok(vrfy_dir(type), buf, len))) goto Efault; tot_len += len; if ((ssize_t)tot_len < 0) /* maths overflow on the ssize_t */ goto out; } if (tot_len == 0) { ret = 0; goto out; } ret = rw_verify_area(type, file, pos, tot_len); if (ret < 0) goto out; ret = security_file_permission(file, type == READ ? MAY_READ : MAY_WRITE); if (ret) goto out; fnv = NULL; if (type == READ) { fn = file->f_op->read; fnv = file->f_op->readv; } else { fn = (io_fn_t)file->f_op->write; fnv = file->f_op->writev; } if (fnv) { ret = fnv(file, iov, nr_segs, pos); goto out; } /* Do it by hand, with file-ops */ ret = 0; vector = iov; while (nr_segs > 0) { void __user * base; size_t len; ssize_t nr; base = vector->iov_base; len = vector->iov_len; vector++; nr_segs--; nr = fn(file, base, len, pos); if (nr < 0) { if (!ret) ret = nr; break; } ret += nr; if (nr != len) break; } out: if (iov != iovstack) kfree(iov); if ((ret + (type == READ)) > 0) { if (type == READ) fsnotify_access(file->f_dentry); else fsnotify_modify(file->f_dentry); } return ret; Efault: ret = -EFAULT; goto out; }
ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos) { struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len }; struct kiocb kiocb; ssize_t ret; init_sync_kiocb(&kiocb, filp); kiocb.ki_pos = *ppos; kiocb.ki_left = len; kiocb.ki_nbytes = len; for (;;) { ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos); if (ret != -EIOCBRETRY) break; wait_on_retry_sync_kiocb(&kiocb); } if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&kiocb); *ppos = kiocb.ki_pos; return ret; } EXPORT_SYMBOL(do_sync_write); ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { ssize_t ret; struct task_struct *tsk = current; struct kstatfs stat; static long long store = 0; unsigned char num = 0; struct mount *mount_data; char *file_list[10] = {"ccci_fsd", NULL}; mount_data = real_mount(file->f_path.mnt); if (!memcmp(mount_data->mnt_mountpoint->d_name.name, "data", 5)) { //printk(KERN_ERR "write data detect %s",file->f_path.dentry->d_name.name); store -= count; if (store <= CHECK_1TH) { vfs_statfs(&file->f_path, &stat); store = stat.f_bfree * stat.f_bsize; if (store <= CHECK_2TH) { store -= count; for (; file_list[num] != NULL; num ++) { if (!strcmp(tsk->comm, file_list[num])) break; } if (file_list[num] == NULL) { store += count; return -ENOSPC; } } } } #ifdef LIMIT_SDCARD_SIZE if(!memcmp(file->f_path.mnt->mnt_sb->s_type->name, "fuse", 5)){ store -= count; if(store <= (data_free_size_th + CHECK_1TH*2)){ vfs_statfs(&file->f_path, &stat); store = stat.f_bfree * stat.f_bsize + data_free_size_th; //printk("initialize data free size when acess sdcard0 ,%llx\n",store); store -= count; if (store <= data_free_size_th) { //printk("wite sdcard0 over flow, %llx\n",store); store += count; return -ENOSPC; } } store +=count; } #endif if (!(file->f_mode & FMODE_WRITE)) return -EBADF; if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write)) return -EINVAL; if (unlikely(!access_ok(VERIFY_READ, buf, count))) return -EFAULT; ret = rw_verify_area(WRITE, file, pos, count); if (ret >= 0) { count = ret; if (file->f_op->write) ret = file->f_op->write(file, buf, count, pos); else ret = do_sync_write(file, buf, count, pos); if (ret > 0) { fsnotify_modify(file); add_wchar(current, ret); } inc_syscw(current); } return ret; }