/** * Look up and return the inode for a path name. * If nameiparent is true, return the inode for the parent and copy the final * path element into name, which must have room for DIRSIZ bytes. * Returns 0 in the case of error. */ static struct inode* namex(char *path, bool nameiparent, char *name) { struct inode *ip; struct inode *next; // If path is a full path, get the pointer to the root inode. Otherwise get // the inode corresponding to the current working directory. if(*path == '/'){ ip = inode_get(ROOTDEV, ROOTINO); } else { ip = inode_dup((struct inode*) tcb_get_cwd(get_curid())); } while((path = skipelem(path, name)) != 0){ inode_lock(ip); if(ip -> type != T_DIR) { return 0; } if(nameiparent && *path == 0) { inode_unlock(ip); return ip; } next = dir_lookup(ip, name, 0); inode_unlockput(ip); ip = next; } if(nameiparent){ inode_put(ip); return 0; } return ip; }
static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); ssize_t ret; if (f2fs_encrypted_inode(inode) && !fscrypt_has_encryption_key(inode) && fscrypt_get_encryption_info(inode)) return -EACCES; inode_lock(inode); ret = generic_write_checks(iocb, from); if (ret > 0) { ret = f2fs_preallocate_blocks(iocb, from); if (!ret) ret = __generic_file_write_iter(iocb, from); } inode_unlock(inode); if (ret > 0) { ssize_t err; err = generic_write_sync(file, iocb->ki_pos - ret, ret); if (err < 0) ret = err; } return ret; }
static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *iter) { struct file *file = iocb->ki_filp; loff_t pos; ssize_t rc; BUG_ON(iocb->private); gossip_debug(GOSSIP_FILE_DEBUG, "orangefs_file_write_iter\n"); inode_lock(file->f_mapping->host); /* Make sure generic_write_checks sees an up to date inode size. */ if (file->f_flags & O_APPEND) { rc = orangefs_inode_getattr(file->f_mapping->host, 0, 1, STATX_SIZE); if (rc == -ESTALE) rc = -EIO; if (rc) { gossip_err("%s: orangefs_inode_getattr failed, " "rc:%zd:.\n", __func__, rc); goto out; } } rc = generic_write_checks(iocb, iter); if (rc <= 0) { gossip_err("%s: generic_write_checks failed, rc:%zd:.\n", __func__, rc); goto out; } /* * if we are appending, generic_write_checks would have updated * pos to the end of the file, so we will wait till now to set * pos... */ pos = iocb->ki_pos; rc = do_readv_writev(ORANGEFS_IO_WRITE, file, &pos, iter); if (rc < 0) { gossip_err("%s: do_readv_writev failed, rc:%zd:.\n", __func__, rc); goto out; } iocb->ki_pos = pos; orangefs_stats.writes++; out: inode_unlock(file->f_mapping->host); return rc; }
int ovl_setattr(struct dentry *dentry, struct iattr *attr) { int err; struct dentry *upperdentry; /* * Check for permissions before trying to copy-up. This is redundant * since it will be rechecked later by ->setattr() on upper dentry. But * without this, copy-up can be triggered by just about anybody. * * We don't initialize inode->size, which just means that * inode_newsize_ok() will always check against MAX_LFS_FILESIZE and not * check for a swapfile (which this won't be anyway). */ err = inode_change_ok(dentry->d_inode, attr); if (err) return err; err = ovl_want_write(dentry); if (err) goto out; if (attr->ia_valid & ATTR_SIZE) { struct inode *realinode = d_inode(ovl_dentry_real(dentry)); err = -ETXTBSY; if (atomic_read(&realinode->i_writecount) < 0) goto out_drop_write; } err = ovl_copy_up(dentry); if (!err) { struct inode *winode = NULL; upperdentry = ovl_dentry_upper(dentry); if (attr->ia_valid & ATTR_SIZE) { winode = d_inode(upperdentry); err = get_write_access(winode); if (err) goto out_drop_write; } inode_lock(upperdentry->d_inode); err = notify_change(upperdentry, attr, NULL); if (!err) ovl_copyattr(upperdentry->d_inode, dentry->d_inode); inode_unlock(upperdentry->d_inode); if (winode) put_write_access(winode); } out_drop_write: ovl_drop_write(dentry); out: return err; }
/* * Locking primitives for read and write IO paths to ensure we consistently use * and order the inode->i_mutex, ip->i_lock and ip->i_iolock. */ static inline void xfs_rw_ilock( struct xfs_inode *ip, int type) { if (type & XFS_IOLOCK_EXCL) inode_lock(VFS_I(ip)); xfs_ilock(ip, type); }
/* Removes any entry for NAME in DIR. Returns true if successful, false on failure, which occurs only if there is no file with the given NAME. */ bool dir_remove (struct dir *dir, const char *name) { struct dir_entry e; struct inode *inode = NULL; bool success = false; off_t ofs; ASSERT (dir != NULL); ASSERT (name != NULL); //lock the directory for the rest of the lookup operation inode_lock(dir->inode); name = getfilename(name); /* Find directory entry. */ if (!lookup (dir, name, &e, &ofs)) goto done; /* Open inode. */ inode = inode_open (e.inode_sector); if (inode == NULL) goto done; //if it is still open don't allow deletion if(inode_type(inode) == FILE_DIR) { //is file in use? if(inode_opencnt(inode) > 1) goto done; char * temp = (char *)malloc(sizeof(char) * (NAME_MAX + 1) ); struct dir * dirtemp = dir_open(inode); //is dir empty? if (dir_readdir(dirtemp,temp)) { free(temp); dir_close(dirtemp); goto done; } free(temp); dir_close(dirtemp); } /* Erase directory entry. */ e.in_use = false; if (inode_write_at (dir->inode, &e, sizeof e, ofs) != sizeof e) goto done; /* Remove inode. */ inode_remove (inode); success = true; done: //unlock the directory inode_unlock(dir->inode); inode_close (inode); return success; }
static int fat_ioctl_get_attributes(struct inode *inode, u32 __user *user_attr) { u32 attr; inode_lock(inode); attr = fat_make_attrs(inode); inode_unlock(inode); return put_user(attr, user_attr); }
int generic_block_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len, get_block_t *get_block) { int ret; inode_lock(inode); ret = __generic_block_fiemap(inode, fieinfo, start, len, get_block); inode_unlock(inode); return ret; }
static int utimes_common(const struct path *path, struct timespec64 *times) { int error; struct iattr newattrs; struct inode *inode = path->dentry->d_inode; struct inode *delegated_inode = NULL; error = mnt_want_write(path->mnt); if (error) goto out; if (times && times[0].tv_nsec == UTIME_NOW && times[1].tv_nsec == UTIME_NOW) times = NULL; newattrs.ia_valid = ATTR_CTIME | ATTR_MTIME | ATTR_ATIME; if (times) { if (times[0].tv_nsec == UTIME_OMIT) newattrs.ia_valid &= ~ATTR_ATIME; else if (times[0].tv_nsec != UTIME_NOW) { newattrs.ia_atime.tv_sec = times[0].tv_sec; newattrs.ia_atime.tv_nsec = times[0].tv_nsec; newattrs.ia_valid |= ATTR_ATIME_SET; } if (times[1].tv_nsec == UTIME_OMIT) newattrs.ia_valid &= ~ATTR_MTIME; else if (times[1].tv_nsec != UTIME_NOW) { newattrs.ia_mtime.tv_sec = times[1].tv_sec; newattrs.ia_mtime.tv_nsec = times[1].tv_nsec; newattrs.ia_valid |= ATTR_MTIME_SET; } /* * Tell setattr_prepare(), that this is an explicit time * update, even if neither ATTR_ATIME_SET nor ATTR_MTIME_SET * were used. */ newattrs.ia_valid |= ATTR_TIMES_SET; } else { newattrs.ia_valid |= ATTR_TOUCH; } retry_deleg: inode_lock(inode); error = notify_change(path->dentry, &newattrs, &delegated_inode); inode_unlock(inode); if (delegated_inode) { error = break_deleg_wait(&delegated_inode); if (!error) goto retry_deleg; } mnt_drop_write(path->mnt); out: return error; }
/** * Get metadata about file f. */ int file_stat(struct file *f, struct file_stat *st) { if(f->type == FD_INODE){ inode_lock(f->ip); inode_stat(f->ip, st); inode_unlock(f->ip); return 0; } return -1; }
static loff_t kernfs_dir_fop_llseek(struct file *file, loff_t offset, int whence) { struct inode *inode = file_inode(file); loff_t ret; inode_lock(inode); ret = generic_file_llseek(file, offset, whence); inode_unlock(inode); return ret; }
loff_t default_llseek(struct file *file, loff_t offset, int whence) { struct inode *inode = file_inode(file); loff_t retval; inode_lock(inode); switch (whence) { case SEEK_END: offset += i_size_read(inode); break; case SEEK_CUR: if (offset == 0) { retval = file->f_pos; goto out; } offset += file->f_pos; break; case SEEK_DATA: /* * In the generic case the entire file is data, so as * long as offset isn't at the end of the file then the * offset is data. */ if (offset >= inode->i_size) { retval = -ENXIO; goto out; } break; case SEEK_HOLE: /* * There is a virtual hole at the end of the file, so * as long as offset isn't i_size or larger, return * i_size. */ if (offset >= inode->i_size) { retval = -ENXIO; goto out; } offset = inode->i_size; break; } retval = -EINVAL; if (offset >= 0 || unsigned_offsets(file)) { if (offset != file->f_pos) { file->f_pos = offset; file->f_version = 0; } retval = offset; } out: inode_unlock(inode); return retval; }
static int mknod_ptmx(struct super_block *sb) { int mode; int rc = -ENOMEM; struct dentry *dentry; struct inode *inode; struct dentry *root = sb->s_root; struct pts_fs_info *fsi = DEVPTS_SB(sb); struct pts_mount_opts *opts = &fsi->mount_opts; kuid_t ptmx_uid = current_fsuid(); kgid_t ptmx_gid = current_fsgid(); inode_lock(d_inode(root)); /* If we have already created ptmx node, return */ if (fsi->ptmx_dentry) { rc = 0; goto out; } dentry = d_alloc_name(root, "ptmx"); if (!dentry) { pr_err("Unable to alloc dentry for ptmx node\n"); goto out; } /* * Create a new 'ptmx' node in this mount of devpts. */ inode = new_inode(sb); if (!inode) { pr_err("Unable to alloc inode for ptmx node\n"); dput(dentry); goto out; } inode->i_ino = 2; inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); mode = S_IFCHR|opts->ptmxmode; init_special_inode(inode, mode, MKDEV(TTYAUX_MAJOR, 2)); inode->i_uid = ptmx_uid; inode->i_gid = ptmx_gid; d_add(dentry, inode); fsi->ptmx_dentry = dentry; rc = 0; out: inode_unlock(d_inode(root)); return rc; }
/******************************************************************************* fs_iget: returns a locked previously allocated inode *******************************************************************************/ struct inode * fs_iget(dev_nr_t dev_nr, inode_nr_t inode_nr) { struct inode *ip; assert(dev_nr, "fs_iget(): dev_nr == 0"); repeat: if ( (ip = ihash_table_get_inode(dev_nr, inode_nr)) ) { if (inode_is_locked(ip)) { ;// sleep on this inode goto repeat; } if (!inode_get_count(ip)) /* the inode is on the free list */ ifree_list_remove_inode(ip); inode_lock(ip); inode_count_inc(ip); // especial processing for mount points ...... // ................... return ip; } /* the inode was not in memory */ if (ifree_list_is_empty()) return NULL; ip = ifree_list_get_head(); ifree_list_remove_inode(ip); if (inode_is_valid(ip)) /* the inode is on the hash table */ ihash_table_remove_inode(ip); inode_set_dev_nr(ip, dev_nr); inode_set_nr(ip, inode_nr); ihash_table_insert_inode(ip); inode_lock(ip); inode_count_inc(ip); fs_iread(ip); return ip; }
/* * Reads from a regular file. */ PUBLIC ssize_t file_read(struct inode *i, void *buf, size_t n, off_t off) { char *p; /* Writing pointer. */ size_t blkoff; /* Block offset. */ size_t chunk; /* Data chunk size. */ block_t blk; /* Working block number. */ struct buffer *bbuf; /* Working block buffer. */ p = buf; inode_lock(i); /* Read data. */ do { blk = block_map(i, off, 0); /* End of file reached. */ if (blk == BLOCK_NULL) goto out; bbuf = bread(i->dev, blk); blkoff = off % BLOCK_SIZE; /* Calculate read chunk size. */ chunk = (n < BLOCK_SIZE - blkoff) ? n : BLOCK_SIZE - blkoff; if ((off_t)chunk > i->size - off) { chunk = i->size - off; if (chunk == 0) { brelse(bbuf); goto out; } } kmemcpy(p, (char *)bbuf->data + blkoff, chunk); brelse(bbuf); n -= chunk; off += chunk; p += chunk; } while (n > 0); out: inode_touch(i); inode_unlock(i); return ((ssize_t)(p - (char *)buf)); }
static ssize_t efivarfs_file_write(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { struct efivar_entry *var = file->private_data; void *data; u32 attributes; struct inode *inode = file->f_mapping->host; unsigned long datasize = count - sizeof(attributes); ssize_t bytes; bool set = false; if (count < sizeof(attributes)) return -EINVAL; if (copy_from_user(&attributes, userbuf, sizeof(attributes))) return -EFAULT; if (attributes & ~(EFI_VARIABLE_MASK)) return -EINVAL; data = memdup_user(userbuf + sizeof(attributes), datasize); if (IS_ERR(data)) return PTR_ERR(data); bytes = efivar_entry_set_get_size(var, attributes, &datasize, data, &set); if (!set && bytes) { if (bytes == -ENOENT) bytes = -EIO; goto out; } if (bytes == -ENOENT) { drop_nlink(inode); d_delete(file->f_path.dentry); dput(file->f_path.dentry); } else { inode_lock(inode); i_size_write(inode, datasize + sizeof(attributes)); inode_unlock(inode); } bytes = count; out: kfree(data); return bytes; }
/* * Writes to a regular file. */ PUBLIC ssize_t file_write(struct inode *i, const void *buf, size_t n, off_t off) { const char *p; /* Reading pointer. */ size_t blkoff; /* Block offset. */ size_t chunk; /* Data chunk size. */ block_t blk; /* Working block number. */ struct buffer *bbuf; /* Working block buffer. */ p = buf; inode_lock(i); /* Write data. */ do { blk = block_map(i, off, 1); /* End of file reached. */ if (blk == BLOCK_NULL) goto out; bbuf = bread(i->dev, blk); blkoff = off % BLOCK_SIZE; chunk = (n < BLOCK_SIZE - blkoff) ? n : BLOCK_SIZE - blkoff; kmemcpy((char *)bbuf->data + blkoff, buf, chunk); bbuf->flags |= BUFFER_DIRTY; brelse(bbuf); n -= chunk; off += chunk; p += chunk; /* Update file size. */ if (off > i->size) { i->size = off; i->flags |= INODE_DIRTY; } } while (n > 0); out: inode_touch(i); inode_unlock(i); return ((ssize_t)(p - (char *)buf)); }
/* exofs_file_fsync - flush the inode to disk * * Note, in exofs all metadata is written as part of inode, regardless. * The writeout is synchronous */ static int exofs_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync) { struct inode *inode = filp->f_mapping->host; int ret; ret = file_write_and_wait_range(filp, start, end); if (ret) return ret; inode_lock(inode); ret = sync_inode_metadata(filp->f_mapping->host, 1); inode_unlock(inode); return ret; }
static int f2fs_ioc_setflags(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); struct f2fs_inode_info *fi = F2FS_I(inode); unsigned int flags = fi->i_flags & FS_FL_USER_VISIBLE; unsigned int oldflags; int ret; ret = mnt_want_write_file(filp); if (ret) return ret; if (!inode_owner_or_capable(inode)) { ret = -EACCES; goto out; } if (get_user(flags, (int __user *)arg)) { ret = -EFAULT; goto out; } flags = f2fs_mask_flags(inode->i_mode, flags); inode_lock(inode); oldflags = fi->i_flags; if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) { if (!capable(CAP_LINUX_IMMUTABLE)) { inode_unlock(inode); ret = -EPERM; goto out; } } flags = flags & FS_FL_USER_MODIFIABLE; flags |= oldflags & ~FS_FL_USER_MODIFIABLE; fi->i_flags = flags; inode_unlock(inode); f2fs_set_inode_flags(inode); inode->i_ctime = CURRENT_TIME; mark_inode_dirty(inode); out: mnt_drop_write_file(filp); return ret; }
/* Adds a file named NAME to DIR, which must not already contain a file by that name. The file's inode is in sector INODE_SECTOR. Returns true if successful, false on failure. Fails if NAME is invalid (i.e. too long) or a disk or memory error occurs. */ bool dir_add (struct dir *dir, const char *name, block_sector_t inode_sector) { struct dir_entry e; off_t ofs; bool success = false; ASSERT (dir != NULL); ASSERT (name != NULL); inode_lock(dir_get_inode (dir)); /* Check NAME for validity. */ if (*name == '\0' || strlen (name) > NAME_MAX) { inode_unlock (dir_get_inode(dir)); return false; } /* Check that NAME is not in use. */ if (lookup (dir, name, NULL, NULL)) goto done; if (!inode_add_parent (inode_get_inumber (dir_get_inode(dir)),inode_sector)) { goto done; } /* Set OFS to offset of free slot. If there are no free slots, then it will be set to the current end-of-file. inode_read_at() will only return a short read at end of file. Otherwise, we'd need to verify that we didn't get a short read due to something intermittent such as low memory. */ for (ofs = 0; inode_read_at (dir->inode, &e, sizeof e, ofs) == sizeof e; ofs += sizeof e) if (!e.in_use) break; /* Write slot. */ e.in_use = true; strlcpy (e.name, name, sizeof e.name); e.inode_sector = inode_sector; success = inode_write_at (dir->inode, &e, sizeof e, ofs) == sizeof e; done: inode_unlock (dir_get_inode(dir)); return success; }
static long f2fs_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { struct inode *inode = file_inode(file); long ret = 0; /* f2fs only support ->fallocate for regular file */ if (!S_ISREG(inode->i_mode)) return -EINVAL; if (f2fs_encrypted_inode(inode) && (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE))) return -EOPNOTSUPP; if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)) return -EOPNOTSUPP; inode_lock(inode); if (mode & FALLOC_FL_PUNCH_HOLE) { if (offset >= inode->i_size) goto out; ret = punch_hole(inode, offset, len); } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { ret = f2fs_collapse_range(inode, offset, len); } else if (mode & FALLOC_FL_ZERO_RANGE) { ret = f2fs_zero_range(inode, offset, len, mode); } else if (mode & FALLOC_FL_INSERT_RANGE) { ret = f2fs_insert_range(inode, offset, len); } else { ret = expand_inode_data(inode, offset, len, mode); } if (!ret) { inode->i_mtime = inode->i_ctime = CURRENT_TIME; mark_inode_dirty(inode); f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); } out: inode_unlock(inode); trace_f2fs_fallocate(inode, mode, offset, len, ret); return ret; }
static int sel_open_policy(struct inode *inode, struct file *filp) { struct policy_load_memory *plm = NULL; int rc; BUG_ON(filp->private_data); mutex_lock(&sel_mutex); rc = avc_has_perm(current_sid(), SECINITSID_SECURITY, SECCLASS_SECURITY, SECURITY__READ_POLICY, NULL); if (rc) goto err; rc = -EBUSY; if (policy_opened) goto err; rc = -ENOMEM; plm = kzalloc(sizeof(*plm), GFP_KERNEL); if (!plm) goto err; if (i_size_read(inode) != security_policydb_len()) { inode_lock(inode); i_size_write(inode, security_policydb_len()); inode_unlock(inode); } rc = security_read_policy(&plm->data, &plm->len); if (rc) goto err; policy_opened = 1; filp->private_data = plm; mutex_unlock(&sel_mutex); return 0; err: mutex_unlock(&sel_mutex); if (plm) vfree(plm->data); kfree(plm); return rc; }
static void hypfs_remove(struct dentry *dentry) { struct dentry *parent; parent = dentry->d_parent; inode_lock(d_inode(parent)); if (simple_positive(dentry)) { if (d_is_dir(dentry)) simple_rmdir(d_inode(parent), dentry); else simple_unlink(d_inode(parent), dentry); } d_delete(dentry); dput(dentry); inode_unlock(d_inode(parent)); }
/** * Read from file f. */ int file_read(struct file *f, char *addr, int n) { int r; if(f->readable == 0) return -1; if(f->type == FD_INODE){ inode_lock(f->ip); if((r = inode_read(f->ip, addr, f->off, n)) > 0) f->off += r; inode_unlock(f->ip); return r; } KERN_PANIC("file_read"); }
static int ocfs2_info_scan_inode_alloc(struct ocfs2_super *osb, struct inode *inode_alloc, u64 blkno, struct ocfs2_info_freeinode *fi, u32 slot) { int status = 0, unlock = 0; struct buffer_head *bh = NULL; struct ocfs2_dinode *dinode_alloc = NULL; if (inode_alloc) inode_lock(inode_alloc); if (o2info_coherent(&fi->ifi_req)) { status = ocfs2_inode_lock(inode_alloc, &bh, 0); if (status < 0) { mlog_errno(status); goto bail; } unlock = 1; } else { status = ocfs2_read_blocks_sync(osb, blkno, 1, &bh); if (status < 0) { mlog_errno(status); goto bail; } } dinode_alloc = (struct ocfs2_dinode *)bh->b_data; fi->ifi_stat[slot].lfi_total = le32_to_cpu(dinode_alloc->id1.bitmap1.i_total); fi->ifi_stat[slot].lfi_free = le32_to_cpu(dinode_alloc->id1.bitmap1.i_total) - le32_to_cpu(dinode_alloc->id1.bitmap1.i_used); bail: if (unlock) ocfs2_inode_unlock(inode_alloc, 0); if (inode_alloc) inode_unlock(inode_alloc); brelse(bh); return status; }
int jffs2_fsync(struct file *filp, loff_t start, loff_t end, int datasync) { struct inode *inode = filp->f_mapping->host; struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); int ret; ret = filemap_write_and_wait_range(inode->i_mapping, start, end); if (ret) return ret; inode_lock(inode); /* Trigger GC to flush any pending writes for this inode */ jffs2_flush_wbuf_gc(c, inode->i_ino); inode_unlock(inode); return 0; }
static int udf_release_file(struct inode *inode, struct file *filp) { if (filp->f_mode & FMODE_WRITE && atomic_read(&inode->i_writecount) == 1) { /* * Grab i_mutex to avoid races with writes changing i_size * while we are running. */ inode_lock(inode); down_write(&UDF_I(inode)->i_data_sem); udf_discard_prealloc(inode); udf_truncate_tail_extent(inode); up_write(&UDF_I(inode)->i_data_sem); inode_unlock(inode); } return 0; }
static int create_file(const char *name, umode_t mode, struct dentry *parent, struct dentry **dentry, const struct file_operations *fops, void *data) { int error; inode_lock(d_inode(parent)); *dentry = lookup_one_len(name, parent, strlen(name)); if (!IS_ERR(*dentry)) error = ipathfs_mknod(d_inode(parent), *dentry, mode, fops, data); else error = PTR_ERR(*dentry); inode_unlock(d_inode(parent)); return error; }
static long fcntl_rw_hint(struct file *file, unsigned int cmd, unsigned long arg) { struct inode *inode = file_inode(file); u64 *argp = (u64 __user *)arg; enum rw_hint hint; u64 h; switch (cmd) { case F_GET_FILE_RW_HINT: h = file_write_hint(file); if (copy_to_user(argp, &h, sizeof(*argp))) return -EFAULT; return 0; case F_SET_FILE_RW_HINT: if (copy_from_user(&h, argp, sizeof(h))) return -EFAULT; hint = (enum rw_hint) h; if (!rw_hint_valid(hint)) return -EINVAL; spin_lock(&file->f_lock); file->f_write_hint = hint; spin_unlock(&file->f_lock); return 0; case F_GET_RW_HINT: h = inode->i_write_hint; if (copy_to_user(argp, &h, sizeof(*argp))) return -EFAULT; return 0; case F_SET_RW_HINT: if (copy_from_user(&h, argp, sizeof(h))) return -EFAULT; hint = (enum rw_hint) h; if (!rw_hint_valid(hint)) return -EINVAL; inode_lock(inode); inode->i_write_hint = hint; inode_unlock(inode); return 0; default: return -EINVAL; } }
static ssize_t udf_file_write_iter(struct kiocb *iocb, struct iov_iter *from) { ssize_t retval; struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); struct udf_inode_info *iinfo = UDF_I(inode); int err; inode_lock(inode); retval = generic_write_checks(iocb, from); if (retval <= 0) goto out; down_write(&iinfo->i_data_sem); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { loff_t end = iocb->ki_pos + iov_iter_count(from); if (inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) + end)) { err = udf_expand_file_adinicb(inode); if (err) { inode_unlock(inode); udf_debug("udf_expand_adinicb: err=%d\n", err); return err; } } else { iinfo->i_lenAlloc = max(end, inode->i_size); up_write(&iinfo->i_data_sem); } } else up_write(&iinfo->i_data_sem); retval = __generic_file_write_iter(iocb, from); out: inode_unlock(inode); if (retval > 0) { mark_inode_dirty(inode); retval = generic_write_sync(iocb, retval); } return retval; }