STATIC int xfs_vn_setattr( struct dentry *dentry, struct iattr *iattr) { return -xfs_setattr(XFS_I(dentry->d_inode), iattr, 0); }
/* * Set up the correct mode on the file based on the supplied ACL. This * makes sure that the mode on the file reflects the state of the * u::,g::[m::], and o:: entries in the ACL. Since the mode is where * the ACL is going to get the permissions for these entries, we must * synchronize the mode whenever we set the ACL on a file. */ STATIC int xfs_acl_setmode( bhv_vnode_t *vp, xfs_acl_t *acl, int *basicperms) { bhv_vattr_t va; xfs_acl_entry_t *ap; xfs_acl_entry_t *gap = NULL; int i, error, nomask = 1; *basicperms = 1; if (acl->acl_cnt == XFS_ACL_NOT_PRESENT) return 0; /* * Copy the u::, g::, o::, and m:: bits from the ACL into the * mode. The m:: bits take precedence over the g:: bits. */ va.va_mask = XFS_AT_MODE; error = xfs_getattr(xfs_vtoi(vp), &va, 0); if (error) return error; va.va_mask = XFS_AT_MODE; va.va_mode &= ~(S_IRWXU|S_IRWXG|S_IRWXO); ap = acl->acl_entry; for (i = 0; i < acl->acl_cnt; ++i) { switch (ap->ae_tag) { case ACL_USER_OBJ: va.va_mode |= ap->ae_perm << 6; break; case ACL_GROUP_OBJ: gap = ap; break; case ACL_MASK: /* more than just standard modes */ nomask = 0; va.va_mode |= ap->ae_perm << 3; *basicperms = 0; break; case ACL_OTHER: va.va_mode |= ap->ae_perm; break; default: /* more than just standard modes */ *basicperms = 0; break; } ap++; } /* Set the group bits from ACL_GROUP_OBJ if there's no ACL_MASK */ if (gap && nomask) va.va_mode |= gap->ae_perm << 3; return xfs_setattr(xfs_vtoi(vp), &va, 0, sys_cred); }
STATIC long xfs_file_fallocate( struct file *file, int mode, loff_t offset, loff_t len) { struct inode *inode = file->f_path.dentry->d_inode; long error; loff_t new_size = 0; xfs_flock64_t bf; xfs_inode_t *ip = XFS_I(inode); int cmd = XFS_IOC_RESVSP; int attr_flags = XFS_ATTR_NOLOCK; if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) return -EOPNOTSUPP; bf.l_whence = 0; bf.l_start = offset; bf.l_len = len; xfs_ilock(ip, XFS_IOLOCK_EXCL); if (mode & FALLOC_FL_PUNCH_HOLE) cmd = XFS_IOC_UNRESVSP; /* check the new inode size is valid before allocating */ if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > i_size_read(inode)) { new_size = offset + len; error = inode_newsize_ok(inode, new_size); if (error) goto out_unlock; } if (file->f_flags & O_DSYNC) attr_flags |= XFS_ATTR_SYNC; error = -xfs_change_file_space(ip, cmd, &bf, 0, attr_flags); if (error) goto out_unlock; /* Change file size if needed */ if (new_size) { struct iattr iattr; iattr.ia_valid = ATTR_SIZE; iattr.ia_size = new_size; error = -xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK); } out_unlock: xfs_iunlock(ip, XFS_IOLOCK_EXCL); return error; }
STATIC int xfs_vn_setattr( struct dentry *dentry, struct iattr *attr) { struct inode *inode = dentry->d_inode; unsigned int ia_valid = attr->ia_valid; bhv_vattr_t vattr = { 0 }; int flags = 0; int error; if (ia_valid & ATTR_UID) { vattr.va_mask |= XFS_AT_UID; vattr.va_uid = attr->ia_uid; } if (ia_valid & ATTR_GID) { vattr.va_mask |= XFS_AT_GID; vattr.va_gid = attr->ia_gid; } if (ia_valid & ATTR_SIZE) { vattr.va_mask |= XFS_AT_SIZE; vattr.va_size = attr->ia_size; } if (ia_valid & ATTR_ATIME) { vattr.va_mask |= XFS_AT_ATIME; vattr.va_atime = attr->ia_atime; inode->i_atime = attr->ia_atime; } if (ia_valid & ATTR_MTIME) { vattr.va_mask |= XFS_AT_MTIME; vattr.va_mtime = attr->ia_mtime; } if (ia_valid & ATTR_CTIME) { vattr.va_mask |= XFS_AT_CTIME; vattr.va_ctime = attr->ia_ctime; } if (ia_valid & ATTR_MODE) { vattr.va_mask |= XFS_AT_MODE; vattr.va_mode = attr->ia_mode; if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) inode->i_mode &= ~S_ISGID; } if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET)) flags |= ATTR_UTIME; #ifdef ATTR_NO_BLOCK if ((ia_valid & ATTR_NO_BLOCK)) flags |= ATTR_NONBLOCK; #endif error = xfs_setattr(XFS_I(inode), &vattr, flags, NULL); if (likely(!error)) vn_revalidate(vn_from_inode(inode)); return -error; }
STATIC long xfs_vn_fallocate( struct inode *inode, int mode, loff_t offset, loff_t len) { long error; loff_t new_size = 0; xfs_flock64_t bf; xfs_inode_t *ip = XFS_I(inode); /* preallocation on directories not yet supported */ error = -ENODEV; if (S_ISDIR(inode->i_mode)) goto out_error; bf.l_whence = 0; bf.l_start = offset; bf.l_len = len; xfs_ilock(ip, XFS_IOLOCK_EXCL); /* check the new inode size is valid before allocating */ if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > i_size_read(inode)) { new_size = offset + len; error = inode_newsize_ok(inode, new_size); if (error) goto out_unlock; } error = -xfs_change_file_space(ip, XFS_IOC_RESVSP, &bf, 0, XFS_ATTR_NOLOCK); if (error) goto out_unlock; /* Change file size if needed */ if (new_size) { struct iattr iattr; iattr.ia_valid = ATTR_SIZE; iattr.ia_size = new_size; error = -xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK); } out_unlock: xfs_iunlock(ip, XFS_IOLOCK_EXCL); out_error: return error; }
static int xfs_set_mode(struct inode *inode, mode_t mode) { int error = 0; if (mode != inode->i_mode) { struct iattr iattr; iattr.ia_valid = ATTR_MODE | ATTR_CTIME; iattr.ia_mode = mode; iattr.ia_ctime = current_fs_time(inode->i_sb); error = -xfs_setattr(XFS_I(inode), &iattr, XFS_ATTR_NOACL); } return error; }
STATIC long xfs_vn_fallocate( struct inode *inode, int mode, loff_t offset, loff_t len) { long error; loff_t new_size = 0; xfs_flock64_t bf; xfs_inode_t *ip = XFS_I(inode); /* preallocation on directories not yet supported */ error = -ENODEV; if (S_ISDIR(inode->i_mode)) goto out_error; bf.l_whence = 0; bf.l_start = offset; bf.l_len = len; xfs_ilock(ip, XFS_IOLOCK_EXCL); error = xfs_change_file_space(ip, XFS_IOC_RESVSP, &bf, 0, NULL, ATTR_NOLOCK); if (!error && !(mode & FALLOC_FL_KEEP_SIZE) && offset + len > i_size_read(inode)) new_size = offset + len; /* Change file size if needed */ if (new_size) { bhv_vattr_t va; va.va_mask = XFS_AT_SIZE; va.va_size = new_size; error = xfs_setattr(ip, &va, ATTR_NOLOCK, NULL); } xfs_iunlock(ip, XFS_IOLOCK_EXCL); out_error: return error; }
STATIC long xfs_vn_fallocate( struct inode *inode, int mode, loff_t offset, loff_t len) { long error; loff_t new_size = 0; xfs_flock64_t bf; xfs_inode_t *ip = XFS_I(inode); int cmd = XFS_IOC_RESVSP; int attr_flags = XFS_ATTR_NOLOCK; if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) return -EOPNOTSUPP; /* preallocation on directories not yet supported */ error = -ENODEV; if (S_ISDIR(inode->i_mode)) goto out_error; bf.l_whence = 0; bf.l_start = offset; bf.l_len = len; xfs_ilock(ip, XFS_IOLOCK_EXCL); if (mode & FALLOC_FL_PUNCH_HOLE) cmd = XFS_IOC_UNRESVSP; /* check the new inode size is valid before allocating */ if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > i_size_read(inode)) { new_size = offset + len; error = inode_newsize_ok(inode, new_size); if (error) goto out_unlock; } /* * RHEL6 porting note: mainline only does sync preallocations here on * O_SYNC files as it is passed a filp and can check this. For RHEL6, * just default to the old "always sync" behaviour as we cannot work * out if we are operating in a sync context or not. */ attr_flags |= XFS_ATTR_SYNC; error = -xfs_change_file_space(ip, cmd, &bf, 0, attr_flags); if (error) goto out_unlock; /* Change file size if needed */ if (new_size) { struct iattr iattr; iattr.ia_valid = ATTR_SIZE; iattr.ia_size = new_size; error = -xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK); } out_unlock: xfs_iunlock(ip, XFS_IOLOCK_EXCL); out_error: return error; }