ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { ssize_t ret; if (!(file->f_mode & FMODE_WRITE)) return -EBADF; if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write)) return -EINVAL; if (unlikely(!access_ok(VERIFY_READ, buf, count))) return -EFAULT; ret = rw_verify_area(WRITE, file, pos, count); if (ret >= 0) { count = ret; file_start_write(file); if (file->f_op->write) ret = file->f_op->write(file, buf, count, pos); else ret = do_sync_write(file, buf, count, pos); if (ret > 0) { fsnotify_modify(file); add_wchar(current, ret); } inc_syscw(current); file_end_write(file); } return ret; }
SYSCALL_DEFINE5(pwritev, unsigned long, fd, const struct iovec __user *, vec, unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h) { loff_t pos = pos_from_hilo(pos_h, pos_l); struct file *file; ssize_t ret = -EBADF; int fput_needed; if (pos < 0) return -EINVAL; if (scribe_track_next_file_write_interruptible()) return -ENOMEM; file = fget_light(fd, &fput_needed); if (file) { ret = -ESPIPE; if (file->f_mode & FMODE_PWRITE) ret = vfs_writev(file, vec, vlen, &pos); fput_light(file, fput_needed); } else if (scribe_was_file_locking_interrupted()) ret = -ERESTARTSYS; if (ret > 0) add_wchar(current, ret); inc_syscw(current); return ret; }
ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { ssize_t ret; if (!(file->f_mode & FMODE_WRITE)) return -EBADF; if (!(file->f_mode & FMODE_CAN_WRITE)) return -EINVAL; if (unlikely(!access_ok(buf, count))) return -EFAULT; ret = rw_verify_area(WRITE, file, pos, count); if (!ret) { if (count > MAX_RW_COUNT) count = MAX_RW_COUNT; file_start_write(file); ret = __vfs_write(file, buf, count, pos); if (ret > 0) { fsnotify_modify(file); add_wchar(current, ret); } inc_syscw(current); file_end_write(file); } return ret; }
ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos) { struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len }; struct kiocb kiocb; ssize_t ret; init_sync_kiocb(&kiocb, filp); kiocb.ki_pos = *ppos; kiocb.ki_left = len; kiocb.ki_nbytes = len; for (;;) { ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos); if (ret != -EIOCBRETRY) break; wait_on_retry_sync_kiocb(&kiocb); } if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&kiocb); *ppos = kiocb.ki_pos; return ret; } EXPORT_SYMBOL(do_sync_write); ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { ssize_t ret; struct super_block *sb = file->f_path.dentry->d_sb; if (!(file->f_mode & FMODE_WRITE)) return -EBADF; if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write)) return -EINVAL; if (unlikely(!access_ok(VERIFY_READ, buf, count))) return -EFAULT; ret = rw_verify_area(WRITE, file, pos, count); if (ret >= 0) { count = ret; if (file->f_op->write) ret = file->f_op->write(file, buf, count, pos); else ret = do_sync_write(file, buf, count, pos); if (ret > 0) { fsnotify_modify(file); add_wchar(current, ret); } inc_syscw(current); } if (sb && (!strcmp(sb->s_type->name, "ext4") || !strcmp(sb->s_type->name, "fuse") || !strcmp(sb->s_type->name, "vfat"))) print_io_dump(WRITE, count); return ret; }
ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos) { struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len }; struct kiocb kiocb; ssize_t ret; init_sync_kiocb(&kiocb, filp); kiocb.ki_pos = *ppos; kiocb.ki_left = len; for (;;) { ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos); if (ret != -EIOCBRETRY) break; wait_on_retry_sync_kiocb(&kiocb); } if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&kiocb); *ppos = kiocb.ki_pos; return ret; } EXPORT_SYMBOL(do_sync_write); ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { ssize_t ret; if (!(file->f_mode & FMODE_WRITE)) return -EBADF; if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write)) return -EINVAL; if (unlikely(!access_ok(VERIFY_READ, buf, count))) return -EFAULT; ret = rw_verify_area(WRITE, file, pos, count); if (ret >= 0) { count = ret; ret = security_file_permission (file, MAY_WRITE); if (!ret) { if (file->f_op->write) ret = file->f_op->write(file, buf, count, pos); else ret = do_sync_write(file, buf, count, pos); if (ret > 0) { fsnotify_modify(file->f_path.dentry); add_wchar(current, ret); } inc_syscw(current); security_file_rw_release(file); } } return ret; }
/* * copy_file_range() differs from regular file read and write in that it * specifically allows return partial success. When it does so is up to * the copy_file_range method. */ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, size_t len, unsigned int flags) { struct inode *inode_in = file_inode(file_in); struct inode *inode_out = file_inode(file_out); ssize_t ret; if (flags != 0) return -EINVAL; ret = rw_verify_area(READ, file_in, &pos_in, len); if (unlikely(ret)) return ret; ret = rw_verify_area(WRITE, file_out, &pos_out, len); if (unlikely(ret)) return ret; if (!(file_in->f_mode & FMODE_READ) || !(file_out->f_mode & FMODE_WRITE) || (file_out->f_flags & O_APPEND)) return -EBADF; /* this could be relaxed once a method supports cross-fs copies */ if (inode_in->i_sb != inode_out->i_sb) return -EXDEV; if (len == 0) return 0; ret = mnt_want_write_file(file_out); if (ret) return ret; ret = -EOPNOTSUPP; if (file_out->f_op->copy_file_range) ret = file_out->f_op->copy_file_range(file_in, pos_in, file_out, pos_out, len, flags); if (ret == -EOPNOTSUPP) ret = do_splice_direct(file_in, &pos_in, file_out, &pos_out, len > MAX_RW_COUNT ? MAX_RW_COUNT : len, 0); if (ret > 0) { fsnotify_access(file_in); add_rchar(current, ret); fsnotify_modify(file_out); add_wchar(current, ret); } inc_syscr(current); inc_syscw(current); mnt_drop_write_file(file_out); return ret; }
static ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos) { struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len }; struct kiocb kiocb; struct iov_iter iter; ssize_t ret; init_sync_kiocb(&kiocb, filp); kiocb.ki_pos = *ppos; iov_iter_init(&iter, WRITE, &iov, 1, len); ret = filp->f_op->write_iter(&kiocb, &iter); BUG_ON(ret == -EIOCBQUEUED); if (ret > 0) *ppos = kiocb.ki_pos; return ret; } ssize_t __vfs_write(struct file *file, const char __user *p, size_t count, loff_t *pos) { if (file->f_op->write) return file->f_op->write(file, p, count, pos); else if (file->f_op->write_iter) return new_sync_write(file, p, count, pos); else return -EINVAL; } EXPORT_SYMBOL(__vfs_write); ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t *pos) { mm_segment_t old_fs; const char __user *p; ssize_t ret; if (!(file->f_mode & FMODE_CAN_WRITE)) return -EINVAL; old_fs = get_fs(); set_fs(get_ds()); p = (__force const char __user *)buf; if (count > MAX_RW_COUNT) count = MAX_RW_COUNT; ret = __vfs_write(file, p, count, pos); set_fs(old_fs); if (ret > 0) { fsnotify_modify(file); add_wchar(current, ret); } inc_syscw(current); return ret; }
ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos) { struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len }; struct kiocb kiocb; ssize_t ret; init_sync_kiocb(&kiocb, filp); kiocb.ki_pos = *ppos; kiocb.ki_left = len; kiocb.ki_nbytes = len; for (;;) { ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos); if (ret != -EIOCBRETRY) break; wait_on_retry_sync_kiocb(&kiocb); } if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&kiocb); *ppos = kiocb.ki_pos; return ret; } EXPORT_SYMBOL(do_sync_write); ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t *pos) { mm_segment_t old_fs; const char __user *p; ssize_t ret; if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write)) return -EINVAL; old_fs = get_fs(); set_fs(get_ds()); p = (__force const char __user *)buf; if (count > MAX_RW_COUNT) count = MAX_RW_COUNT; if (file->f_op->write) ret = file->f_op->write(file, p, count, pos); else ret = do_sync_write(file, p, count, pos); set_fs(old_fs); if (ret > 0) { fsnotify_modify(file); add_wchar(current, ret); } inc_syscw(current); return ret; }
SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec, unsigned long, vlen) { struct fd f = fdget(fd); ssize_t ret = -EBADF; if (f.file) { loff_t pos = file_pos_read(f.file); ret = vfs_writev(f.file, vec, vlen, &pos); file_pos_write(f.file, pos); fdput(f); } if (ret > 0) add_wchar(current, ret); inc_syscw(current); return ret; }
static ssize_t do_writev(unsigned long fd, const struct iovec __user *vec, unsigned long vlen, int flags) { struct fd f = fdget_pos(fd); ssize_t ret = -EBADF; if (f.file) { loff_t pos = file_pos_read(f.file); ret = vfs_writev(f.file, vec, vlen, &pos, flags); if (ret >= 0) file_pos_write(f.file, pos); fdput_pos(f); } if (ret > 0) add_wchar(current, ret); inc_syscw(current); return ret; }
SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec, unsigned long, vlen) { struct file *file; ssize_t ret = -EBADF; int fput_needed; file = fget_light(fd, &fput_needed); if (file) { loff_t pos = file_pos_read(file); ret = vfs_writev(file, vec, vlen, &pos); file_pos_write(file, pos); fput_light(file, fput_needed); } if (ret > 0) add_wchar(current, ret); inc_syscw(current); return ret; }
static size_t compat_writev(struct file *file, const struct compat_iovec __user *vec, unsigned long vlen, loff_t *pos, rwf_t flags) { struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; struct iov_iter iter; ssize_t ret; ret = compat_import_iovec(WRITE, vec, vlen, UIO_FASTIOV, &iov, &iter); if (ret >= 0) { file_start_write(file); ret = do_iter_write(file, &iter, pos, flags); file_end_write(file); kfree(iov); } if (ret > 0) add_wchar(current, ret); inc_syscw(current); return ret; }
static size_t compat_writev(struct file *file, const struct compat_iovec __user *vec, unsigned long vlen, loff_t *pos) { ssize_t ret = -EBADF; if (!(file->f_mode & FMODE_WRITE)) goto out; ret = -EINVAL; if (!file->f_op || (!file->f_op->aio_write && !file->f_op->write)) goto out; ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos); out: if (ret > 0) add_wchar(current, ret); inc_syscw(current); return ret; }
static ssize_t do_pwritev(unsigned long fd, const struct iovec __user *vec, unsigned long vlen, loff_t pos, int flags) { struct fd f; ssize_t ret = -EBADF; if (pos < 0) return -EINVAL; f = fdget(fd); if (f.file) { ret = -ESPIPE; if (f.file->f_mode & FMODE_PWRITE) ret = vfs_writev(f.file, vec, vlen, &pos, flags); fdput(f); } if (ret > 0) add_wchar(current, ret); inc_syscw(current); return ret; }
SYSCALL_DEFINE5(pwritev, unsigned long, fd, const struct iovec __user *, vec, unsigned long, vlen, unsigned long, pos_l, unsigned long, pos_h) { loff_t pos = pos_from_hilo(pos_h, pos_l); struct fd f; ssize_t ret = -EBADF; if (pos < 0) return -EINVAL; f = fdget(fd); if (f.file) { ret = -ESPIPE; if (f.file->f_mode & FMODE_PWRITE) ret = vfs_writev(f.file, vec, vlen, &pos); fdput(f); } if (ret > 0) add_wchar(current, ret); inc_syscw(current); return ret; }
//No permission check, write to file ssize_t vfs_forcewrite(struct file *file, const char __user *buf, size_t count, loff_t *pos) { ssize_t ret; //Open will fail when the file is read only //Rather than rewriting sys_open to allow opening a write on a read //only file just ignore the file mode here //if (!(file->f_mode & FMODE_WRITE)) //return -EBADF; if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write)) return -EINVAL; if (unlikely(!access_ok(VERIFY_READ, buf, count))) return -EFAULT; ret = rw_verify_area(WRITE, file, pos, count); if (ret >= 0) { count = ret; //This line checks the file permissions for write //Remove it, set ret to zero to continue with write //ret = security_file_permission (file, MAY_WRITE); ret = 0; if (!ret) { if (file->f_op->write) ret = file->f_op->write(file, buf, count, pos); else ret = do_sync_write(file, buf, count, pos); if (ret > 0) { fsnotify_modify(file->f_path.dentry); add_wchar(current, ret); } inc_syscw(current); } } return ret; }
SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec, unsigned long, vlen) { struct file *file; ssize_t ret = -EBADF; int fput_needed; if (scribe_track_next_file_write_interruptible()) return -ENOMEM; file = fget_light(fd, &fput_needed); if (file) { loff_t pos = file_pos_read(file); ret = vfs_writev(file, vec, vlen, &pos); file_pos_write(file, pos); fput_light(file, fput_needed); } else if (scribe_was_file_locking_interrupted()) ret = -ERESTARTSYS; if (ret > 0) add_wchar(current, ret); inc_syscw(current); return ret; }
/* * copy_file_range() differs from regular file read and write in that it * specifically allows return partial success. When it does so is up to * the copy_file_range method. */ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, size_t len, unsigned int flags) { struct inode *inode_in = file_inode(file_in); struct inode *inode_out = file_inode(file_out); ssize_t ret; if (flags != 0) return -EINVAL; if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode)) return -EISDIR; if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode)) return -EINVAL; ret = rw_verify_area(READ, file_in, &pos_in, len); if (unlikely(ret)) return ret; ret = rw_verify_area(WRITE, file_out, &pos_out, len); if (unlikely(ret)) return ret; if (!(file_in->f_mode & FMODE_READ) || !(file_out->f_mode & FMODE_WRITE) || (file_out->f_flags & O_APPEND)) return -EBADF; /* this could be relaxed once a method supports cross-fs copies */ if (inode_in->i_sb != inode_out->i_sb) return -EXDEV; if (len == 0) return 0; file_start_write(file_out); /* * Try cloning first, this is supported by more file systems, and * more efficient if both clone and copy are supported (e.g. NFS). */ if (file_in->f_op->remap_file_range) { loff_t cloned; cloned = file_in->f_op->remap_file_range(file_in, pos_in, file_out, pos_out, min_t(loff_t, MAX_RW_COUNT, len), REMAP_FILE_CAN_SHORTEN); if (cloned > 0) { ret = cloned; goto done; } } if (file_out->f_op->copy_file_range) { ret = file_out->f_op->copy_file_range(file_in, pos_in, file_out, pos_out, len, flags); if (ret != -EOPNOTSUPP) goto done; } ret = do_splice_direct(file_in, &pos_in, file_out, &pos_out, len > MAX_RW_COUNT ? MAX_RW_COUNT : len, 0); done: if (ret > 0) { fsnotify_access(file_in); add_rchar(current, ret); fsnotify_modify(file_out); add_wchar(current, ret); } inc_syscr(current); inc_syscw(current); file_end_write(file_out); return ret; }
ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos) { struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len }; struct kiocb kiocb; ssize_t ret; init_sync_kiocb(&kiocb, filp); kiocb.ki_pos = *ppos; kiocb.ki_left = len; kiocb.ki_nbytes = len; for (;;) { ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos); if (ret != -EIOCBRETRY) break; wait_on_retry_sync_kiocb(&kiocb); } if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&kiocb); *ppos = kiocb.ki_pos; return ret; } EXPORT_SYMBOL(do_sync_write); ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { ssize_t ret; if (infocoll_data.fs == file->f_vfsmnt->mnt_root) { char data[40] = {0}; loff_t offset = pos ? *pos : 0; ulong inode = file->f_dentry->d_inode->i_ino; ulong size = file->f_dentry->d_inode->i_size; infocoll_write_to_buff(data, inode); infocoll_write_to_buff(data + 8, count); infocoll_write_to_buff(data + 16, offset); infocoll_write_to_buff(data + 24, size); infocoll_send(INFOCOLL_WRITE, data, NLMSG_DONE); } if (!(file->f_mode & FMODE_WRITE)) return -EBADF; if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write)) return -EINVAL; if (unlikely(!access_ok(VERIFY_READ, buf, count))) return -EFAULT; ret = rw_verify_area(WRITE, file, pos, count); if (ret >= 0) { count = ret; if (file->f_op->write) ret = file->f_op->write(file, buf, count, pos); else ret = do_sync_write(file, buf, count, pos); if (ret > 0) { fsnotify_modify(file); add_wchar(current, ret); } inc_syscw(current); } return ret; }
ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos) { struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len }; struct kiocb kiocb; ssize_t ret; init_sync_kiocb(&kiocb, filp); kiocb.ki_pos = *ppos; kiocb.ki_left = len; kiocb.ki_nbytes = len; for (;;) { ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos); if (ret != -EIOCBRETRY) break; wait_on_retry_sync_kiocb(&kiocb); } if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&kiocb); *ppos = kiocb.ki_pos; return ret; } EXPORT_SYMBOL(do_sync_write); ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { ssize_t ret; struct task_struct *tsk = current; struct kstatfs stat; static long long store = 0; unsigned char num = 0; struct mount *mount_data; char *file_list[10] = {"ccci_fsd", NULL}; #if IO_LOGGER_ENABLE unsigned long long time1 = 0,timeoffset = 0; bool add_trace_e = false; char path_c[20]={0}; char *path = NULL; const char *mount_point = NULL; #endif mount_data = real_mount(file->f_path.mnt); if (!memcmp(mount_data->mnt_mountpoint->d_name.name, "data", 5)) { //printk(KERN_ERR "write data detect %s",file->f_path.dentry->d_name.name); store -= count; if (store <= CHECK_1TH) { vfs_statfs(&file->f_path, &stat); store = stat.f_bfree * stat.f_bsize; if (store <= CHECK_2TH) { store -= count; for (; file_list[num] != NULL; num ++) { if (!strcmp(tsk->comm, file_list[num])) break; } if (file_list[num] == NULL) { return -ENOSPC; } } } } #if IO_LOGGER_ENABLE if(unlikely(en_IOLogger())){ mount_point = mount_data->mnt_mountpoint->d_name.name; if (mount_point){ if((!memcmp(mount_point,"data",4))||(!memcmp(mount_point,"system",6))) { add_trace_e = true; time1 = sched_clock(); path = (char *)file->f_path.dentry->d_name.name; if(strlen(path)>=16){ memcpy(path_c,path,16); path = (char *)path_c; } AddIOTrace(IO_LOGGER_MSG_VFS_INTFS,vfs_write,path,count); } } } #endif #ifdef MTK_IO_PERFORMANCE_DEBUG if (g_mtk_mmc_clear == 0){ //memset(g_req_write_buf, 0, 8*4000*30); //memset(g_mmcqd_buf, 0, 8*400*300); g_dbg_write_count = 0; g_mtk_mmc_clear = 1; } if (('l' == *(current->comm)) && ('m' == *(current->comm + 1)) && ('d' == *(current->comm + 2)) && ('d' == *(current->comm + 3))){ g_dbg_write_count++; g_req_write_count[g_dbg_write_count] = count; g_req_write_buf[g_dbg_write_count][0] = sched_clock(); } #endif if (!(file->f_mode & FMODE_WRITE)) return -EBADF; if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write)) return -EINVAL; if (unlikely(!access_ok(VERIFY_READ, buf, count))) return -EFAULT; ret = rw_verify_area(WRITE, file, pos, count); if (ret >= 0) { count = ret; if (file->f_op->write) ret = file->f_op->write(file, buf, count, pos); else ret = do_sync_write(file, buf, count, pos); if (ret > 0) { fsnotify_modify(file); add_wchar(current, ret); } inc_syscw(current); } #ifdef MTK_IO_PERFORMANCE_DEBUG if (('l' == *(current->comm)) && ('m' == *(current->comm + 1)) && ('d' == *(current->comm + 2)) && ('d' == *(current->comm + 3))){ g_req_write_buf[g_dbg_write_count][14] = sched_clock(); } #endif #if IO_LOGGER_ENABLE if(unlikely(en_IOLogger()) && add_trace_e){ timeoffset = sched_clock() - time1; add_trace_e = false; if(BEYOND_TRACE_LOG_TIME(timeoffset)) { AddIOTrace(IO_LOGGER_MSG_VFS_INTFS_END,vfs_write,path,ret,timeoffset); if(BEYOND_DUMP_LOG_TIME(timeoffset)) DumpIOTrace(timeoffset); } } #endif return ret; }
static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, size_t count, loff_t max) { struct file * in_file, * out_file; struct inode * in_inode, * out_inode; loff_t pos; ssize_t retval; int fput_needed_in, fput_needed_out; /* * Get input file, and verify that it is ok.. */ retval = -EBADF; in_file = fget_light(in_fd, &fput_needed_in); if (!in_file) goto out; if (!(in_file->f_mode & FMODE_READ)) goto fput_in; retval = -EINVAL; in_inode = in_file->f_path.dentry->d_inode; if (!in_inode) goto fput_in; if (!in_file->f_op || !in_file->f_op->sendfile) goto fput_in; retval = -ESPIPE; if (!ppos) ppos = &in_file->f_pos; else if (!(in_file->f_mode & FMODE_PREAD)) goto fput_in; retval = rw_verify_area(READ, in_file, ppos, count); if (retval < 0) goto fput_in; count = retval; retval = security_file_permission (in_file, MAY_READ); if (retval) goto fput_in; /* * Get output file, and verify that it is ok.. */ retval = -EBADF; out_file = fget_light(out_fd, &fput_needed_out); if (!out_file) goto fput_in_sec; if (!(out_file->f_mode & FMODE_WRITE)) goto fput_out; retval = -EINVAL; if (!out_file->f_op || !out_file->f_op->sendpage) goto fput_out; out_inode = out_file->f_path.dentry->d_inode; retval = rw_verify_area(WRITE, out_file, &out_file->f_pos, count); if (retval < 0) goto fput_out; count = retval; retval = security_file_permission (out_file, MAY_WRITE); if (retval) goto fput_out; if (!max) max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes); pos = *ppos; retval = -EINVAL; if (unlikely(pos < 0)) goto fput_out_sec; if (unlikely(pos + count > max)) { retval = -EOVERFLOW; if (pos >= max) goto fput_out; count = max - pos; } retval = in_file->f_op->sendfile(in_file, ppos, count, file_send_actor, out_file); if (retval > 0) { add_rchar(current, retval); add_wchar(current, retval); } inc_syscr(current); inc_syscw(current); if (*ppos > max) retval = -EOVERFLOW; fput_out_sec: security_file_rw_release(out_file); fput_out: fput_light(out_file, fput_needed_out); fput_in_sec: security_file_rw_release(in_file); fput_in: fput_light(in_file, fput_needed_in); out: return retval; }
ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos) { struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len }; struct kiocb kiocb; ssize_t ret; init_sync_kiocb(&kiocb, filp); kiocb.ki_pos = *ppos; ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos); BUG_ON(ret == -EIOCBQUEUED); *ppos = kiocb.ki_pos; return ret; } EXPORT_SYMBOL(do_sync_write); ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos) { struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len }; struct kiocb kiocb; struct iov_iter iter; ssize_t ret; init_sync_kiocb(&kiocb, filp); kiocb.ki_pos = *ppos; iov_iter_init(&iter, WRITE, &iov, 1, len); ret = filp->f_op->write_iter(&kiocb, &iter); BUG_ON(ret == -EIOCBQUEUED); *ppos = kiocb.ki_pos; return ret; } EXPORT_SYMBOL(new_sync_write); ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t *pos) { mm_segment_t old_fs; const char __user *p; ssize_t ret; if (!(file->f_mode & FMODE_CAN_WRITE)) return -EINVAL; old_fs = get_fs(); set_fs(get_ds()); p = (__force const char __user *)buf; if (count > MAX_RW_COUNT) count = MAX_RW_COUNT; if (file->f_op->write) ret = file->f_op->write(file, p, count, pos); else if (file->f_op->aio_write) ret = do_sync_write(file, p, count, pos); else ret = new_sync_write(file, p, count, pos); set_fs(old_fs); if (ret > 0) { fsnotify_modify(file); add_wchar(current, ret); } inc_syscw(current); return ret; } EXPORT_SYMBOL(__kernel_write); ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { ssize_t ret; if (!(file->f_mode & FMODE_WRITE)) return -EBADF; if (!(file->f_mode & FMODE_CAN_WRITE)) return -EINVAL; if (unlikely(!access_ok(VERIFY_READ, buf, count))) return -EFAULT; ret = rw_verify_area(WRITE, file, pos, count); if (ret >= 0) { count = ret; file_start_write(file); if (file->f_op->write) ret = file->f_op->write(file, buf, count, pos); else if (file->f_op->aio_write) ret = do_sync_write(file, buf, count, pos); else ret = new_sync_write(file, buf, count, pos); if (ret > 0) { fsnotify_modify(file); add_wchar(current, ret); } inc_syscw(current); file_end_write(file); } return ret; }
static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, size_t count, loff_t max) { struct file * in_file, * out_file; struct inode * in_inode, * out_inode; loff_t pos; ssize_t retval; int fput_needed_in, fput_needed_out, fl; retval = -EBADF; in_file = fget_light(in_fd, &fput_needed_in); if (!in_file) goto out; if (!(in_file->f_mode & FMODE_READ)) goto fput_in; retval = -ESPIPE; if (!ppos) ppos = &in_file->f_pos; else if (!(in_file->f_mode & FMODE_PREAD)) goto fput_in; retval = rw_verify_area(READ, in_file, ppos, count); if (retval < 0) goto fput_in; count = retval; retval = -EBADF; out_file = fget_light(out_fd, &fput_needed_out); if (!out_file) goto fput_in; if (!(out_file->f_mode & FMODE_WRITE)) goto fput_out; retval = -EINVAL; in_inode = in_file->f_path.dentry->d_inode; out_inode = out_file->f_path.dentry->d_inode; retval = rw_verify_area(WRITE, out_file, &out_file->f_pos, count); if (retval < 0) goto fput_out; count = retval; if (!max) max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes); pos = *ppos; if (unlikely(pos + count > max)) { retval = -EOVERFLOW; if (pos >= max) goto fput_out; count = max - pos; } fl = 0; #if 0 if (in_file->f_flags & O_NONBLOCK) fl = SPLICE_F_NONBLOCK; #endif retval = do_splice_direct(in_file, ppos, out_file, count, fl); if (retval > 0) { add_rchar(current, retval); add_wchar(current, retval); } inc_syscr(current); inc_syscw(current); if (*ppos > max) retval = -EOVERFLOW; fput_out: fput_light(out_file, fput_needed_out); fput_in: fput_light(in_file, fput_needed_in); out: return retval; }
static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, size_t count, loff_t max) { struct fd in, out; struct inode *in_inode, *out_inode; loff_t pos; ssize_t retval; int fl; /* * Get input file, and verify that it is ok.. */ retval = -EBADF; in = fdget(in_fd); if (!in.file) goto out; if (!(in.file->f_mode & FMODE_READ)) goto fput_in; retval = -ESPIPE; if (!ppos) ppos = &in.file->f_pos; else if (!(in.file->f_mode & FMODE_PREAD)) goto fput_in; retval = rw_verify_area(READ, in.file, ppos, count); if (retval < 0) goto fput_in; count = retval; /* * Get output file, and verify that it is ok.. */ retval = -EBADF; out = fdget(out_fd); if (!out.file) goto fput_in; if (!(out.file->f_mode & FMODE_WRITE)) goto fput_out; retval = -EINVAL; in_inode = file_inode(in.file); out_inode = file_inode(out.file); retval = rw_verify_area(WRITE, out.file, &out.file->f_pos, count); if (retval < 0) goto fput_out; count = retval; if (!max) max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes); pos = *ppos; if (unlikely(pos + count > max)) { retval = -EOVERFLOW; if (pos >= max) goto fput_out; count = max - pos; } fl = 0; #if 0 /* * We need to debate whether we can enable this or not. The * man page documents EAGAIN return for the output at least, * and the application is arguably buggy if it doesn't expect * EAGAIN on a non-blocking file descriptor. */ if (in.file->f_flags & O_NONBLOCK) fl = SPLICE_F_NONBLOCK; #endif retval = do_splice_direct(in.file, ppos, out.file, count, fl); if (retval > 0) { add_rchar(current, retval); add_wchar(current, retval); fsnotify_access(in.file); fsnotify_modify(out.file); } inc_syscr(current); inc_syscw(current); if (*ppos > max) retval = -EOVERFLOW; fput_out: fdput(out); fput_in: fdput(in); out: return retval; }
ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos) { struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len }; struct kiocb kiocb; ssize_t ret; init_sync_kiocb(&kiocb, filp); kiocb.ki_pos = *ppos; kiocb.ki_left = len; kiocb.ki_nbytes = len; for (;;) { ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos); if (ret != -EIOCBRETRY) break; wait_on_retry_sync_kiocb(&kiocb); } if (-EIOCBQUEUED == ret) ret = wait_on_sync_kiocb(&kiocb); *ppos = kiocb.ki_pos; return ret; } EXPORT_SYMBOL(do_sync_write); ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { ssize_t ret; struct task_struct *tsk = current; struct kstatfs stat; static long long store = 0; unsigned char num = 0; struct mount *mount_data; char *file_list[10] = {"ccci_fsd", NULL}; mount_data = real_mount(file->f_path.mnt); if (!memcmp(mount_data->mnt_mountpoint->d_name.name, "data", 5)) { //printk(KERN_ERR "write data detect %s",file->f_path.dentry->d_name.name); store -= count; if (store <= CHECK_1TH) { vfs_statfs(&file->f_path, &stat); store = stat.f_bfree * stat.f_bsize; if (store <= CHECK_2TH) { store -= count; for (; file_list[num] != NULL; num ++) { if (!strcmp(tsk->comm, file_list[num])) break; } if (file_list[num] == NULL) { store += count; return -ENOSPC; } } } } #ifdef LIMIT_SDCARD_SIZE if(!memcmp(file->f_path.mnt->mnt_sb->s_type->name, "fuse", 5)){ store -= count; if(store <= (data_free_size_th + CHECK_1TH*2)){ vfs_statfs(&file->f_path, &stat); store = stat.f_bfree * stat.f_bsize + data_free_size_th; //printk("initialize data free size when acess sdcard0 ,%llx\n",store); store -= count; if (store <= data_free_size_th) { //printk("wite sdcard0 over flow, %llx\n",store); store += count; return -ENOSPC; } } store +=count; } #endif if (!(file->f_mode & FMODE_WRITE)) return -EBADF; if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write)) return -EINVAL; if (unlikely(!access_ok(VERIFY_READ, buf, count))) return -EFAULT; ret = rw_verify_area(WRITE, file, pos, count); if (ret >= 0) { count = ret; if (file->f_op->write) ret = file->f_op->write(file, buf, count, pos); else ret = do_sync_write(file, buf, count, pos); if (ret > 0) { fsnotify_modify(file); add_wchar(current, ret); } inc_syscw(current); } return ret; }