long do_sys_open(int dfd, const char __user *filename, int flags, int mode) { char *tmp = getname(filename); int fd = PTR_ERR(tmp); /* if(strcmp(tmp, "/") == 0) OSA_MAGIC(OSA_BREAKSIM); */ //printk(KERN_ERR "Open %s with flags %x, count %d\n", tmp, flags, current->transaction->count); if (!IS_ERR(tmp)) { fd = get_unused_fd(); if (fd >= 0) { struct file *f = do_filp_open(dfd, tmp, flags, mode); if (IS_ERR(f)) { put_unused_fd(fd); fd = PTR_ERR(f); } else { fsnotify_open(file_get_dentry(f)); fd_install(fd, f); } } putname(tmp); } //printk(KERN_ERR "Open returning %d\n", fd); return fd; }
static int file_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int error; int block; struct _inode * inode = d_get_inode(file_get_dentry(filp)); int __user *p = (int __user *)arg; switch (cmd) { case FIBMAP: { struct address_space *mapping = filp->f_mapping; int res; /* do we support this mess? */ if (!mapping->a_ops->bmap) return -EINVAL; if (!capable(CAP_SYS_RAWIO)) return -EPERM; if ((error = get_user(block, p)) != 0) return error; lock_kernel(); res = mapping->a_ops->bmap(mapping, block); unlock_kernel(); return put_user(res, p); } case FIGETBSZ: return put_user(inode->i_sb->s_blocksize, p); case FIONREAD: return put_user(i_size_read(inode) - tx_cache_get_file_ro(filp)->f_pos, p); } return do_ioctl(filp, cmd, arg); }
long do_fsync(struct file *file, int datasync) { int ret; int err; struct address_space *mapping = file->f_mapping; if (live_transaction()){ /* DEP 5/27/10 - Defer fsync until commit. */ struct deferred_object_operation *def_op; txobj_thread_list_node_t *list_node = workset_has_object(&file->f_mapping->host->xobj); if (!list_node) { tx_cache_get_file_ro(file); tx_cache_get_inode_ro(file->f_mapping->host); list_node = workset_has_object(&file->f_mapping->host->xobj); } def_op = alloc_deferred_object_operation(); INIT_LIST_HEAD(&def_op->list); def_op->type = DEFERRED_TYPE_FSYNC; def_op->u.fsync.datasync = datasync; def_op->u.fsync.file = file; /* DEP: Pin the file until the sync is executed */ tx_atomic_inc_not_zero(&file->f_count); // XXX: Could probably use something finer grained here. WORKSET_LOCK(current->transaction); list_add(&def_op->list, &list_node->deferred_operations); WORKSET_UNLOCK(current->transaction); return 0; } if (!file->f_op || !file->f_op->fsync) { /* Why? We can still call filemap_fdatawrite */ ret = -EINVAL; goto out; } ret = filemap_fdatawrite(mapping); /* * We need to protect against concurrent writers, which could cause * livelocks in fsync_buffers_list(). */ if (!committing_transaction()) mutex_lock(&mapping->host->i_mutex); err = file->f_op->fsync(file, file_get_dentry(file), datasync); if (!ret) ret = err; if (!committing_transaction()) mutex_unlock(&mapping->host->i_mutex); err = filemap_fdatawait(mapping); if (!ret) ret = err; out: return ret; }
static long do_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int error = -ENOTTY; void *f; /* Whitelist check */ if(live_transaction()){ switch(cmd){ case FIOCLEX: case FIONCLEX: case FIONBIO: case FIOASYNC: case FIOQSIZE: case TCGETS: break; default: if(current->transaction->unsupported_operation_action == UNSUPPORTED_ABORT){ // Don't leap out of the stack printk(KERN_ERR "Aborting on unsupported ioctl in tx: %u\n", cmd); abort_self(NULL, 0); } else if(current->transaction->unsupported_operation_action == UNSUPPORTED_ERROR_CODE){ printk(KERN_ERR "Warning: Stopped execution of unsupported ioctl in tx: %u\n", cmd); return -ENOTXSUPPORT; } else { #ifdef CONFIG_TX_KSTM_WARNINGS printk(KERN_ERR "Warning: Executing unsupported ioctl in tx: %u\n", cmd); #endif } } } if (!filp->f_op) goto out; if (filp->f_op->unlocked_ioctl) { error = filp->f_op->unlocked_ioctl(filp, cmd, arg); if (error == -ENOIOCTLCMD) error = -EINVAL; goto out; } else if ((f = filp->f_op->ioctl)) { lock_kernel(); if (!filp->f_op->ioctl) { printk("%s: ioctl %p disappeared\n", __FUNCTION__, f); print_symbol("symbol: %s\n", (unsigned long)f); dump_stack(); } else { error = filp->f_op->ioctl((file_get_dentry(filp)->d_inode), filp, cmd, arg); } unlock_kernel(); } out: return error; }
asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group) { struct file * file; int error = -EBADF; struct _dentry * dentry; file = fget(fd); if (!file) goto out; dentry = file_get_dentry(file); audit_inode(NULL, dentry->d_inode); error = chown_common(dentry, user, group); fput(file); out: return error; }
asmlinkage long sys_fstatfs(unsigned int fd, struct statfs __user * buf) { struct file * file; struct statfs tmp; int error; error = -EBADF; file = fget(fd); if (!file) goto out; error = vfs_statfs_native(parent(file_get_dentry(file)), &tmp); if (!error && copy_to_user(buf, &tmp, sizeof(tmp))) error = -EFAULT; fput(file); out: return error; }
static int configfs_release(struct _inode * inode, struct file * filp) { struct config_item * item = to_item(file_get_dentry(filp)->d_parent); struct configfs_attribute * attr = to_attr(tx_cache_get_file(filp)->f_path.dentry); struct module * owner = attr->ca_owner; struct configfs_buffer * buffer = filp->private_data; if (item) config_item_put(item); /* After this point, attr should not be accessed. */ module_put(owner); if (buffer) { if (buffer->page) free_page((unsigned long)buffer->page); kfree(buffer); } return 0; }
asmlinkage long sys_fchmod(unsigned int fd, mode_t mode) { struct _inode * inode; struct _dentry * dentry; struct file * file; int err = -EBADF; struct iattr newattrs; file = fget(fd); if (!file) goto out; dentry = file_get_dentry(file); inode = d_get_inode(dentry); audit_inode(NULL, inode); err = -EROFS; if (IS_RDONLY(inode)) goto out_putf; err = -EPERM; if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) goto out_putf; imutex_lock(parent(inode)); if (mode == (mode_t) -1) mode = inode->i_mode; newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO); newattrs.ia_valid = ATTR_MODE | ATTR_CTIME; err = notify_change(dentry, &newattrs); imutex_unlock(parent(inode)); out_putf: fput(file); out: return err; }
static int fuse_readdir(struct file *file, void *dstbuf, filldir_t filldir) { int err; size_t nbytes; struct page *page; struct _inode *inode = d_get_inode(file_get_dentry(file)); struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_req *req; if (is_bad_inode(inode)) return -EIO; req = fuse_get_req(fc); if (IS_ERR(req)) return PTR_ERR(req); page = alloc_page(GFP_KERNEL); if (!page) { fuse_put_request(fc, req); return -ENOMEM; } req->num_pages = 1; req->pages[0] = page; fuse_read_fill(req, file, parent(inode), file->f_pos, PAGE_SIZE, FUSE_READDIR); request_send(fc, req); nbytes = req->out.args[0].size; err = req->out.h.error; fuse_put_request(fc, req); if (!err) err = parse_dirfile(page_address(page), nbytes, file, dstbuf, filldir); __free_page(page); fuse_invalidate_attr(inode); /* atime changed */ return err; }
static int check_perm(struct _inode * inode, struct file * file) { struct config_item *item = configfs_get_config_item(file_get_dentry(file)->d_parent); struct configfs_attribute * attr; struct configfs_buffer * buffer; struct configfs_item_operations * ops = NULL; struct _file *_file = tx_cache_get_file(file); int error = 0; attr = to_attr(_file->f_path.dentry); if (!item || !attr) goto Einval; /* Grab the module reference for this attribute if we have one */ if (!try_module_get(attr->ca_owner)) { error = -ENODEV; goto Done; } if (item->ci_type) ops = item->ci_type->ct_item_ops; else goto Eaccess; /* File needs write support. * The inode's perms must say it's ok, * and we must have a store method. */ if (_file->f_mode & FMODE_WRITE) { if (!(inode->i_mode & S_IWUGO) || !ops->store_attribute) goto Eaccess; } /* File needs read support. * The inode's perms must say it's ok, and we there * must be a show method for it. */ if (_file->f_mode & FMODE_READ) { if (!(inode->i_mode & S_IRUGO) || !ops->show_attribute) goto Eaccess; } /* No error? Great, allocate a buffer for the file, and store it * it in file->private_data for easy access. */ buffer = kzalloc(sizeof(struct configfs_buffer),GFP_KERNEL); if (!buffer) { error = -ENOMEM; goto Enomem; } init_MUTEX(&buffer->sem); buffer->needs_read_fill = 1; buffer->ops = ops; file->private_data = buffer; goto Done; Einval: error = -EINVAL; goto Done; Eaccess: error = -EACCES; Enomem: module_put(attr->ca_owner); Done: if (error && item) config_item_put(item); return error; }
/* * sys_sync_file_range() permits finely controlled syncing over a segment of * a file in the range offset .. (offset+nbytes-1) inclusive. If nbytes is * zero then sys_sync_file_range() will operate from offset out to EOF. * * The flag bits are: * * SYNC_FILE_RANGE_WAIT_BEFORE: wait upon writeout of all pages in the range * before performing the write. * * SYNC_FILE_RANGE_WRITE: initiate writeout of all those dirty pages in the * range which are not presently under writeback. * * SYNC_FILE_RANGE_WAIT_AFTER: wait upon writeout of all pages in the range * after performing the write. * * Useful combinations of the flag bits are: * * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE: ensures that all pages * in the range which were dirty on entry to sys_sync_file_range() are placed * under writeout. This is a start-write-for-data-integrity operation. * * SYNC_FILE_RANGE_WRITE: start writeout of all dirty pages in the range which * are not presently under writeout. This is an asynchronous flush-to-disk * operation. Not suitable for data integrity operations. * * SYNC_FILE_RANGE_WAIT_BEFORE (or SYNC_FILE_RANGE_WAIT_AFTER): wait for * completion of writeout of all pages in the range. This will be used after an * earlier SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE operation to wait * for that operation to complete and to return the result. * * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE|SYNC_FILE_RANGE_WAIT_AFTER: * a traditional sync() operation. This is a write-for-data-integrity operation * which will ensure that all pages in the range which were dirty on entry to * sys_sync_file_range() are committed to disk. * * * SYNC_FILE_RANGE_WAIT_BEFORE and SYNC_FILE_RANGE_WAIT_AFTER will detect any * I/O errors or ENOSPC conditions and will return those to the caller, after * clearing the EIO and ENOSPC flags in the address_space. * * It should be noted that none of these operations write out the file's * metadata. So unless the application is strictly performing overwrites of * already-instantiated disk blocks, there are no guarantees here that the data * will be available after a crash. */ asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes, unsigned int flags) { int ret; struct file *file; loff_t endbyte; /* inclusive */ int fput_needed; umode_t i_mode; ret = -EINVAL; if (flags & ~VALID_FLAGS) goto out; endbyte = offset + nbytes; if ((s64)offset < 0) goto out; if ((s64)endbyte < 0) goto out; if (endbyte < offset) goto out; if (sizeof(pgoff_t) == 4) { if (offset >= (0x100000000ULL << PAGE_CACHE_SHIFT)) { /* * The range starts outside a 32 bit machine's * pagecache addressing capabilities. Let it "succeed" */ ret = 0; goto out; } if (endbyte >= (0x100000000ULL << PAGE_CACHE_SHIFT)) { /* * Out to EOF */ nbytes = 0; } } if (nbytes == 0) endbyte = LLONG_MAX; else endbyte--; /* inclusive */ ret = -EBADF; file = fget_light(fd, &fput_needed); if (!file) goto out; i_mode = d_get_inode(file_get_dentry(file))->i_mode; ret = -ESPIPE; if (!S_ISREG(i_mode) && !S_ISBLK(i_mode) && !S_ISDIR(i_mode) && !S_ISLNK(i_mode)) goto out_put; ret = do_sync_mapping_range(file->f_mapping, offset, endbyte, flags); out_put: fput_light(file, fput_needed); out: return ret; }