/* * Hook in SELinux. This is not quite correct yet, what we really need * here (as we do for default ACLs) is a mechanism by which creation of * these attrs can be journalled at inode creation time (along with the * inode, of course, such that log replay can't cause these to be lost). */ STATIC int xfs_init_security( bhv_vnode_t *vp, struct inode *dir) { struct inode *ip = vn_to_inode(vp); size_t length; void *value; char *name; int error; error = security_inode_init_security(ip, dir, &name, &value, &length); if (error) { if (error == -EOPNOTSUPP) return 0; return -error; } error = xfs_attr_set(XFS_I(ip), name, value, length, ATTR_SECURE); if (!error) xfs_iflags_set(XFS_I(ip), XFS_IMODIFIED); kfree(name); kfree(value); return error; }
STATIC int xfs_vn_symlink( struct inode *dir, struct dentry *dentry, const char *symname) { struct inode *ip; bhv_vnode_t *cvp; /* used to lookup symlink to put in dentry */ int error; mode_t mode; cvp = NULL; mode = S_IFLNK | (irix_symlink_mode ? 0777 & ~current->fs->umask : S_IRWXUGO); error = xfs_symlink(XFS_I(dir), dentry, (char *)symname, mode, &cvp, NULL); if (likely(!error && cvp)) { error = xfs_init_security(cvp, dir); if (likely(!error)) { ip = vn_to_inode(cvp); d_instantiate(dentry, ip); xfs_validate_fields(dir); xfs_validate_fields(ip); } else { xfs_cleanup_inode(dir, cvp, dentry, 0); } } return -error; }
/* * If the linux inode exists, mark it dirty. * Used when commiting a dirty inode into a transaction so that * the inode will get written back by the linux code */ void xfs_mark_inode_dirty_sync( xfs_inode_t *ip) { bhv_vnode_t *vp; vp = XFS_ITOV_NULL(ip); if (vp) mark_inode_dirty_sync(vn_to_inode(vp)); }
/* * Add a reference to a referenced vnode. */ bhv_vnode_t * vn_hold( bhv_vnode_t *vp) { struct inode *inode; XFS_STATS_INC(vn_hold); inode = igrab(vn_to_inode(vp)); ASSERT(inode); return vp; }
/* * Revalidate the Linux inode from the XFS inode. * Note: i_size _not_ updated; we must hold the inode * semaphore when doing that - callers responsibility. */ int vn_revalidate( bhv_vnode_t *vp) { struct inode *inode = vn_to_inode(vp); struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; unsigned long xflags; xfs_itrace_entry(ip); if (XFS_FORCED_SHUTDOWN(mp)) return -EIO; xfs_ilock(ip, XFS_ILOCK_SHARED); inode->i_mode = ip->i_d.di_mode; inode->i_uid = ip->i_d.di_uid; inode->i_gid = ip->i_d.di_gid; inode->i_mtime.tv_sec = ip->i_d.di_mtime.t_sec; inode->i_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec; inode->i_ctime.tv_sec = ip->i_d.di_ctime.t_sec; inode->i_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec; xflags = xfs_ip2xflags(ip); if (xflags & XFS_XFLAG_IMMUTABLE) inode->i_flags |= S_IMMUTABLE; else inode->i_flags &= ~S_IMMUTABLE; if (xflags & XFS_XFLAG_APPEND) inode->i_flags |= S_APPEND; else inode->i_flags &= ~S_APPEND; if (xflags & XFS_XFLAG_SYNC) inode->i_flags |= S_SYNC; else inode->i_flags &= ~S_SYNC; if (xflags & XFS_XFLAG_NOATIME) inode->i_flags |= S_NOATIME; else inode->i_flags &= ~S_NOATIME; xfs_iunlock(ip, XFS_ILOCK_SHARED); xfs_iflags_clear(ip, XFS_IMODIFIED); return 0; }
STATIC struct dentry * xfs_fs_get_parent( struct dentry *child) { int error; bhv_vnode_t *cvp; struct dentry *parent; cvp = NULL; error = xfs_lookup(XFS_I(child->d_inode), &dotdot, &cvp); if (unlikely(error)) return ERR_PTR(-error); parent = d_alloc_anon(vn_to_inode(cvp)); if (unlikely(!parent)) { VN_RELE(cvp); return ERR_PTR(-ENOMEM); } return parent; }
STATIC struct dentry * xfs_vn_lookup( struct inode *dir, struct dentry *dentry, struct nameidata *nd) { bhv_vnode_t *cvp; int error; if (dentry->d_name.len >= MAXNAMELEN) return ERR_PTR(-ENAMETOOLONG); error = xfs_lookup(XFS_I(dir), dentry, &cvp); if (unlikely(error)) { if (unlikely(error != ENOENT)) return ERR_PTR(-error); d_add(dentry, NULL); return NULL; } return d_splice_alias(vn_to_inode(cvp), dentry); }
/* * Change the requested timestamp in the given inode. * We don't lock across timestamp updates, and we don't log them but * we do record the fact that there is dirty information in core. * * NOTE -- callers MUST combine XFS_ICHGTIME_MOD or XFS_ICHGTIME_CHG * with XFS_ICHGTIME_ACC to be sure that access time * update will take. Calling first with XFS_ICHGTIME_ACC * and then XFS_ICHGTIME_MOD may fail to modify the access * timestamp if the filesystem is mounted noacctm. */ void xfs_ichgtime( xfs_inode_t *ip, int flags) { struct inode *inode = vn_to_inode(XFS_ITOV(ip)); timespec_t tv; nanotime(&tv); if (flags & XFS_ICHGTIME_MOD) { inode->i_mtime = tv; ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec; ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec; } if (flags & XFS_ICHGTIME_ACC) { inode->i_atime = tv; ip->i_d.di_atime.t_sec = (__int32_t)tv.tv_sec; ip->i_d.di_atime.t_nsec = (__int32_t)tv.tv_nsec; } if (flags & XFS_ICHGTIME_CHG) { inode->i_ctime = tv; ip->i_d.di_ctime.t_sec = (__int32_t)tv.tv_sec; ip->i_d.di_ctime.t_nsec = (__int32_t)tv.tv_nsec; } /* * We update the i_update_core field _after_ changing * the timestamps in order to coordinate properly with * xfs_iflush() so that we don't lose timestamp updates. * This keeps us from having to hold the inode lock * while doing this. We use the SYNCHRONIZE macro to * ensure that the compiler does not reorder the update * of i_update_core above the timestamp updates above. */ SYNCHRONIZE(); ip->i_update_core = 1; if (!(inode->i_state & I_NEW)) mark_inode_dirty_sync(inode); }
STATIC void xfs_cleanup_inode( struct inode *dir, bhv_vnode_t *vp, struct dentry *dentry, int mode) { struct dentry teardown = {}; /* Oh, the horror. * If we can't add the ACL or we fail in * xfs_init_security we must back out. * ENOSPC can hit here, among other things. */ teardown.d_inode = vn_to_inode(vp); teardown.d_name = dentry->d_name; if (S_ISDIR(mode)) xfs_rmdir(XFS_I(dir), &teardown); else xfs_remove(XFS_I(dir), &teardown); VN_RELE(vp); }
int /* error (positive) */ xfs_zero_eof( vnode_t *vp, xfs_iocore_t *io, xfs_off_t offset, /* starting I/O offset */ xfs_fsize_t isize, /* current inode size */ xfs_fsize_t end_size) /* terminal inode size */ { struct inode *ip = vn_to_inode(vp); xfs_fileoff_t start_zero_fsb; xfs_fileoff_t end_zero_fsb; xfs_fileoff_t zero_count_fsb; xfs_fileoff_t last_fsb; xfs_extlen_t buf_len_fsb; xfs_mount_t *mp = io->io_mount; int nimaps; int error = 0; xfs_bmbt_irec_t imap; ASSERT(ismrlocked(io->io_lock, MR_UPDATE)); ASSERT(ismrlocked(io->io_iolock, MR_UPDATE)); ASSERT(offset > isize); /* * First handle zeroing the block on which isize resides. * We only zero a part of that block so it is handled specially. */ error = xfs_zero_last_block(ip, io, isize, end_size); if (error) { ASSERT(ismrlocked(io->io_lock, MR_UPDATE)); ASSERT(ismrlocked(io->io_iolock, MR_UPDATE)); return error; } /* * Calculate the range between the new size and the old * where blocks needing to be zeroed may exist. To get the * block where the last byte in the file currently resides, * we need to subtract one from the size and truncate back * to a block boundary. We subtract 1 in case the size is * exactly on a block boundary. */ last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1; start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize); end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1); ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb); if (last_fsb == end_zero_fsb) { /* * The size was only incremented on its last block. * We took care of that above, so just return. */ return 0; } ASSERT(start_zero_fsb <= end_zero_fsb); while (start_zero_fsb <= end_zero_fsb) { nimaps = 1; zero_count_fsb = end_zero_fsb - start_zero_fsb + 1; error = XFS_BMAPI(mp, NULL, io, start_zero_fsb, zero_count_fsb, 0, NULL, 0, &imap, &nimaps, NULL); if (error) { ASSERT(ismrlocked(io->io_lock, MR_UPDATE)); ASSERT(ismrlocked(io->io_iolock, MR_UPDATE)); return error; } ASSERT(nimaps > 0); if (imap.br_state == XFS_EXT_UNWRITTEN || imap.br_startblock == HOLESTARTBLOCK) { /* * This loop handles initializing pages that were * partially initialized by the code below this * loop. It basically zeroes the part of the page * that sits on a hole and sets the page as P_HOLE * and calls remapf if it is a mapped file. */ start_zero_fsb = imap.br_startoff + imap.br_blockcount; ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); continue; } /* * There are blocks in the range requested. * Zero them a single write at a time. We actually * don't zero the entire range returned if it is * too big and simply loop around to get the rest. * That is not the most efficient thing to do, but it * is simple and this path should not be exercised often. */ buf_len_fsb = XFS_FILBLKS_MIN(imap.br_blockcount, mp->m_writeio_blocks << 8); /* * Drop the inode lock while we're doing the I/O. * We'll still have the iolock to protect us. */ XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); error = xfs_iozero(ip, XFS_FSB_TO_B(mp, start_zero_fsb), XFS_FSB_TO_B(mp, buf_len_fsb), end_size); if (error) { goto out_lock; } start_zero_fsb = imap.br_startoff + buf_len_fsb; ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); } return 0; out_lock: XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); ASSERT(error >= 0); return error; }
/* * Convert userspace handle data into vnode (and inode). * We [ab]use the fact that all the fsop_handlereq ioctl calls * have a data structure argument whose first component is always * a xfs_fsop_handlereq_t, so we can cast to and from this type. * This allows us to optimise the copy_from_user calls and gives * a handy, shared routine. * * If no error, caller must always VN_RELE the returned vp. */ STATIC int xfs_vget_fsop_handlereq( xfs_mount_t *mp, struct inode *parinode, /* parent inode pointer */ xfs_fsop_handlereq_t *hreq, vnode_t **vp, struct inode **inode) { void __user *hanp; size_t hlen; xfs_fid_t *xfid; xfs_handle_t *handlep; xfs_handle_t handle; xfs_inode_t *ip; struct inode *inodep; vnode_t *vpp; xfs_ino_t ino; __u32 igen; int error; /* * Only allow handle opens under a directory. */ if (!S_ISDIR(parinode->i_mode)) return XFS_ERROR(ENOTDIR); hanp = hreq->ihandle; hlen = hreq->ihandlen; handlep = &handle; if (hlen < sizeof(handlep->ha_fsid) || hlen > sizeof(*handlep)) return XFS_ERROR(EINVAL); if (copy_from_user(handlep, hanp, hlen)) return XFS_ERROR(EFAULT); if (hlen < sizeof(*handlep)) memset(((char *)handlep) + hlen, 0, sizeof(*handlep) - hlen); if (hlen > sizeof(handlep->ha_fsid)) { if (handlep->ha_fid.xfs_fid_len != (hlen - sizeof(handlep->ha_fsid) - sizeof(handlep->ha_fid.xfs_fid_len)) || handlep->ha_fid.xfs_fid_pad) return XFS_ERROR(EINVAL); } /* * Crack the handle, obtain the inode # & generation # */ xfid = (struct xfs_fid *)&handlep->ha_fid; if (xfid->xfs_fid_len == sizeof(*xfid) - sizeof(xfid->xfs_fid_len)) { ino = xfid->xfs_fid_ino; igen = xfid->xfs_fid_gen; } else { return XFS_ERROR(EINVAL); } /* * Get the XFS inode, building a vnode to go with it. */ error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0); if (error) return error; if (ip == NULL) return XFS_ERROR(EIO); if (ip->i_d.di_mode == 0 || ip->i_d.di_gen != igen) { xfs_iput_new(ip, XFS_ILOCK_SHARED); return XFS_ERROR(ENOENT); } vpp = XFS_ITOV(ip); inodep = vn_to_inode(vpp); xfs_iunlock(ip, XFS_ILOCK_SHARED); *vp = vpp; *inode = inodep; return 0; }
STATIC int xfs_ioc_xattr( vnode_t *vp, xfs_inode_t *ip, struct file *filp, unsigned int cmd, void __user *arg) { struct fsxattr fa; struct vattr *vattr; int error = 0; int attr_flags; unsigned int flags; vattr = kmalloc(sizeof(*vattr), GFP_KERNEL); if (unlikely(!vattr)) return -ENOMEM; switch (cmd) { case XFS_IOC_FSGETXATTR: { vattr->va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | \ XFS_AT_NEXTENTS | XFS_AT_PROJID; VOP_GETATTR(vp, vattr, 0, NULL, error); if (unlikely(error)) { error = -error; break; } fa.fsx_xflags = vattr->va_xflags; fa.fsx_extsize = vattr->va_extsize; fa.fsx_nextents = vattr->va_nextents; fa.fsx_projid = vattr->va_projid; if (copy_to_user(arg, &fa, sizeof(fa))) { error = -EFAULT; break; } break; } case XFS_IOC_FSSETXATTR: { if (copy_from_user(&fa, arg, sizeof(fa))) { error = -EFAULT; break; } attr_flags = 0; if (filp->f_flags & (O_NDELAY|O_NONBLOCK)) attr_flags |= ATTR_NONBLOCK; vattr->va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | XFS_AT_PROJID; vattr->va_xflags = fa.fsx_xflags; vattr->va_extsize = fa.fsx_extsize; vattr->va_projid = fa.fsx_projid; VOP_SETATTR(vp, vattr, attr_flags, NULL, error); if (likely(!error)) __vn_revalidate(vp, vattr); /* update flags */ error = -error; break; } case XFS_IOC_FSGETXATTRA: { vattr->va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE | \ XFS_AT_ANEXTENTS | XFS_AT_PROJID; VOP_GETATTR(vp, vattr, 0, NULL, error); if (unlikely(error)) { error = -error; break; } fa.fsx_xflags = vattr->va_xflags; fa.fsx_extsize = vattr->va_extsize; fa.fsx_nextents = vattr->va_anextents; fa.fsx_projid = vattr->va_projid; if (copy_to_user(arg, &fa, sizeof(fa))) { error = -EFAULT; break; } break; } case XFS_IOC_GETXFLAGS: { flags = xfs_di2lxflags(ip->i_d.di_flags); if (copy_to_user(arg, &flags, sizeof(flags))) error = -EFAULT; break; } case XFS_IOC_SETXFLAGS: { if (copy_from_user(&flags, arg, sizeof(flags))) { error = -EFAULT; break; } if (flags & ~(LINUX_XFLAG_IMMUTABLE | LINUX_XFLAG_APPEND | \ LINUX_XFLAG_NOATIME | LINUX_XFLAG_NODUMP | \ LINUX_XFLAG_SYNC)) { error = -EOPNOTSUPP; break; } attr_flags = 0; if (filp->f_flags & (O_NDELAY|O_NONBLOCK)) attr_flags |= ATTR_NONBLOCK; vattr->va_mask = XFS_AT_XFLAGS; vattr->va_xflags = xfs_merge_ioc_xflags(flags, xfs_ip2xflags(ip)); VOP_SETATTR(vp, vattr, attr_flags, NULL, error); if (likely(!error)) __vn_revalidate(vp, vattr); /* update flags */ error = -error; break; } case XFS_IOC_GETVERSION: { flags = vn_to_inode(vp)->i_generation; if (copy_to_user(arg, &flags, sizeof(flags))) error = -EFAULT; break; } default: error = -ENOTTY; break; } kfree(vattr); return error; }
STATIC int xfs_vn_mknod( struct inode *dir, struct dentry *dentry, int mode, dev_t rdev) { struct inode *ip; bhv_vnode_t *vp = NULL, *dvp = vn_from_inode(dir); xfs_acl_t *default_acl = NULL; attrexists_t test_default_acl = _ACL_DEFAULT_EXISTS; int error; /* * Irix uses Missed'em'V split, but doesn't want to see * the upper 5 bits of (14bit) major. */ if (unlikely(!sysv_valid_dev(rdev) || MAJOR(rdev) & ~0x1ff)) return -EINVAL; if (unlikely(test_default_acl && test_default_acl(dvp))) { if (!_ACL_ALLOC(default_acl)) { return -ENOMEM; } if (!_ACL_GET_DEFAULT(dvp, default_acl)) { _ACL_FREE(default_acl); default_acl = NULL; } } if (IS_POSIXACL(dir) && !default_acl && xfs_has_fs_struct(current)) mode &= ~current->fs->umask; switch (mode & S_IFMT) { case S_IFCHR: case S_IFBLK: case S_IFIFO: case S_IFSOCK: rdev = sysv_encode_dev(rdev); case S_IFREG: error = xfs_create(XFS_I(dir), dentry, mode, rdev, &vp, NULL); break; case S_IFDIR: error = xfs_mkdir(XFS_I(dir), dentry, mode, &vp, NULL); break; default: error = EINVAL; break; } if (unlikely(!error)) { error = xfs_init_security(vp, dir); if (error) xfs_cleanup_inode(dir, vp, dentry, mode); } if (unlikely(default_acl)) { if (!error) { error = _ACL_INHERIT(vp, mode, default_acl); if (!error) xfs_iflags_set(XFS_I(vp), XFS_IMODIFIED); else xfs_cleanup_inode(dir, vp, dentry, mode); } _ACL_FREE(default_acl); } if (likely(!error)) { ASSERT(vp); ip = vn_to_inode(vp); if (S_ISDIR(mode)) xfs_validate_fields(ip); d_instantiate(dentry, ip); xfs_validate_fields(dir); } return -error; }