STATIC struct inode * xfs_nfs_get_inode( struct super_block *sb, u64 ino, u32 generation) { xfs_mount_t *mp = XFS_M(sb); xfs_inode_t *ip; int error; /* * NFS can sometimes send requests for ino 0. Fail them gracefully. */ if (ino == 0) return ERR_PTR(-ESTALE); error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0); if (error) return ERR_PTR(-error); if (!ip) return ERR_PTR(-EIO); if (ip->i_d.di_gen != generation) { xfs_iput_new(ip, XFS_ILOCK_SHARED); return ERR_PTR(-ENOENT); } xfs_iunlock(ip, XFS_ILOCK_SHARED); return VFS_I(ip); }
int xfs_dir_lookup_int( bhv_desc_t *dir_bdp, uint lock_mode, vname_t *dentry, xfs_ino_t *inum, xfs_inode_t **ipp) { vnode_t *dir_vp; xfs_inode_t *dp; int error; dir_vp = BHV_TO_VNODE(dir_bdp); vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address); dp = XFS_BHVTOI(dir_bdp); error = XFS_DIR_LOOKUP(dp->i_mount, NULL, dp, VNAME(dentry), VNAMELEN(dentry), inum); if (!error) { /* * Unlock the directory. We do this because we can't * hold the directory lock while doing the vn_get() * in xfs_iget(). Doing so could cause us to hold * a lock while waiting for the inode to finish * being inactive while it's waiting for a log * reservation in the inactive routine. */ xfs_iunlock(dp, lock_mode); error = xfs_iget(dp->i_mount, NULL, *inum, 0, 0, ipp, 0); xfs_ilock(dp, lock_mode); if (error) { *ipp = NULL; } else if ((*ipp)->i_d.di_mode == 0) { /* * The inode has been freed. Something is * wrong so just get out of here. */ xfs_iunlock(dp, lock_mode); xfs_iput_new(*ipp, 0); *ipp = NULL; xfs_ilock(dp, lock_mode); error = XFS_ERROR(ENOENT); } } return error; }
STATIC struct inode * xfs_nfs_get_inode( struct super_block *sb, u64 ino, u32 generation) { xfs_mount_t *mp = XFS_M(sb); xfs_inode_t *ip; int error; /* * NFS can sometimes send requests for ino 0. Fail them gracefully. */ if (ino == 0) return ERR_PTR(-ESTALE); /* * The XFS_IGET_UNTRUSTED means that an invalid inode number is just * fine and not an indication of a corrupted filesystem as clients can * send invalid file handles and we have to handle it gracefully.. */ error = xfs_iget(mp, NULL, ino, XFS_IGET_UNTRUSTED, XFS_ILOCK_SHARED, &ip); if (error) { /* * EINVAL means the inode cluster doesn't exist anymore. * This implies the filehandle is stale, so we should * translate it here. * We don't use ESTALE directly down the chain to not * confuse applications using bulkstat that expect EINVAL. */ if (error == EINVAL) error = ESTALE; return ERR_PTR(-error); } if (ip->i_d.di_gen != generation) { xfs_iput_new(ip, XFS_ILOCK_SHARED); return ERR_PTR(-ENOENT); } xfs_iunlock(ip, XFS_ILOCK_SHARED); return VFS_I(ip); }
int xfs_dir_lookup_int( xfs_inode_t *dp, uint lock_mode, bhv_vname_t *dentry, xfs_ino_t *inum, xfs_inode_t **ipp) { int error; xfs_itrace_entry(dp); error = xfs_dir_lookup(NULL, dp, VNAME(dentry), VNAMELEN(dentry), inum); if (!error) { /* * Unlock the directory. We do this because we can't * hold the directory lock while doing the vn_get() * in xfs_iget(). Doing so could cause us to hold * a lock while waiting for the inode to finish * being inactive while it's waiting for a log * reservation in the inactive routine. */ xfs_iunlock(dp, lock_mode); error = xfs_iget(dp->i_mount, NULL, *inum, 0, 0, ipp, 0); xfs_ilock(dp, lock_mode); if (error) { *ipp = NULL; } else if ((*ipp)->i_d.di_mode == 0) { /* * The inode has been freed. Something is * wrong so just get out of here. */ xfs_iunlock(dp, lock_mode); xfs_iput_new(*ipp, 0); *ipp = NULL; xfs_ilock(dp, lock_mode); error = XFS_ERROR(ENOENT); } } return error; }
STATIC int xfs_qm_internalqcheck_adjust( xfs_mount_t *mp, /* mount point for filesystem */ xfs_ino_t ino, /* inode number to get data for */ void __user *buffer, /* not used */ int ubsize, /* not used */ void *private_data, /* not used */ xfs_daddr_t bno, /* starting block of inode cluster */ int *ubused, /* not used */ void *dip, /* not used */ int *res) /* bulkstat result code */ { xfs_inode_t *ip; xfs_dqtest_t *ud, *gd; uint lock_flags; boolean_t ipreleased; int error; ASSERT(XFS_IS_QUOTA_RUNNING(mp)); if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) { *res = BULKSTAT_RV_NOTHING; qdprintk("internalqcheck: ino=%llu, uqino=%llu, gqino=%llu\n", (unsigned long long) ino, (unsigned long long) mp->m_sb.sb_uquotino, (unsigned long long) mp->m_sb.sb_gquotino); return XFS_ERROR(EINVAL); } ipreleased = B_FALSE; again: lock_flags = XFS_ILOCK_SHARED; if ((error = xfs_iget(mp, NULL, ino, 0, lock_flags, &ip, bno))) { *res = BULKSTAT_RV_NOTHING; return (error); } if (ip->i_d.di_mode == 0) { xfs_iput_new(ip, lock_flags); *res = BULKSTAT_RV_NOTHING; return XFS_ERROR(ENOENT); } /* * This inode can have blocks after eof which can get released * when we send it to inactive. Since we don't check the dquot * until the after all our calculations are done, we must get rid * of those now. */ if (! ipreleased) { xfs_iput(ip, lock_flags); ipreleased = B_TRUE; goto again; } xfs_qm_internalqcheck_get_dquots(mp, (xfs_dqid_t) ip->i_d.di_uid, (xfs_dqid_t) ip->i_d.di_projid, (xfs_dqid_t) ip->i_d.di_gid, &ud, &gd); if (XFS_IS_UQUOTA_ON(mp)) { ASSERT(ud); xfs_qm_internalqcheck_dqadjust(ip, ud); } if (XFS_IS_OQUOTA_ON(mp)) { ASSERT(gd); xfs_qm_internalqcheck_dqadjust(ip, gd); } xfs_iput(ip, lock_flags); *res = BULKSTAT_RV_DIDONE; return (0); }
/* * Convert userspace handle data into vnode (and inode). * We [ab]use the fact that all the fsop_handlereq ioctl calls * have a data structure argument whose first component is always * a xfs_fsop_handlereq_t, so we can cast to and from this type. * This allows us to optimise the copy_from_user calls and gives * a handy, shared routine. * * If no error, caller must always VN_RELE the returned vp. */ STATIC int xfs_vget_fsop_handlereq( xfs_mount_t *mp, struct inode *parinode, /* parent inode pointer */ int cap, /* capability level for op */ unsigned long arg, /* userspace data pointer */ unsigned long size, /* size of expected struct */ /* output arguments */ xfs_fsop_handlereq_t *hreq, vnode_t **vp, struct inode **inode) { void *hanp; size_t hlen; xfs_fid_t *xfid; xfs_handle_t *handlep; xfs_handle_t handle; xfs_inode_t *ip; struct inode *inodep; vnode_t *vpp; xfs_ino_t ino; __u32 igen; int error; if (!capable(cap)) return XFS_ERROR(EPERM); /* * Only allow handle opens under a directory. */ if (!S_ISDIR(parinode->i_mode)) return XFS_ERROR(ENOTDIR); /* * Copy the handle down from the user and validate * that it looks to be in the correct format. */ if (copy_from_user(hreq, (struct xfs_fsop_handlereq *)arg, size)) return XFS_ERROR(EFAULT); hanp = hreq->ihandle; hlen = hreq->ihandlen; handlep = &handle; if (hlen < sizeof(handlep->ha_fsid) || hlen > sizeof(*handlep)) return XFS_ERROR(EINVAL); if (copy_from_user(handlep, hanp, hlen)) return XFS_ERROR(EFAULT); if (hlen < sizeof(*handlep)) memset(((char *)handlep) + hlen, 0, sizeof(*handlep) - hlen); if (hlen > sizeof(handlep->ha_fsid)) { if (handlep->ha_fid.xfs_fid_len != (hlen - sizeof(handlep->ha_fsid) - sizeof(handlep->ha_fid.xfs_fid_len)) || handlep->ha_fid.xfs_fid_pad) return XFS_ERROR(EINVAL); } /* * Crack the handle, obtain the inode # & generation # */ xfid = (struct xfs_fid *)&handlep->ha_fid; if (xfid->xfs_fid_len == sizeof(*xfid) - sizeof(xfid->xfs_fid_len)) { ino = xfid->xfs_fid_ino; igen = xfid->xfs_fid_gen; } else { return XFS_ERROR(EINVAL); } /* * Get the XFS inode, building a vnode to go with it. */ error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0); if (error) return error; if (ip == NULL) return XFS_ERROR(EIO); if (ip->i_d.di_mode == 0 || ip->i_d.di_gen != igen) { xfs_iput_new(ip, XFS_ILOCK_SHARED); return XFS_ERROR(ENOENT); } vpp = XFS_ITOV(ip); inodep = LINVFS_GET_IP(vpp); xfs_iunlock(ip, XFS_ILOCK_SHARED); *vp = vpp; *inode = inodep; return 0; }
/* * Convert userspace handle data into inode. * * We use the fact that all the fsop_handlereq ioctl calls have a data * structure argument whose first component is always a xfs_fsop_handlereq_t, * so we can pass that sub structure into this handy, shared routine. * * If no error, caller must always iput the returned inode. */ STATIC int xfs_vget_fsop_handlereq( xfs_mount_t *mp, struct inode *parinode, /* parent inode pointer */ xfs_fsop_handlereq_t *hreq, struct inode **inode) { void __user *hanp; size_t hlen; xfs_fid_t *xfid; xfs_handle_t *handlep; xfs_handle_t handle; xfs_inode_t *ip; xfs_ino_t ino; __u32 igen; int error; /* * Only allow handle opens under a directory. */ if (!S_ISDIR(parinode->i_mode)) return XFS_ERROR(ENOTDIR); hanp = hreq->ihandle; hlen = hreq->ihandlen; handlep = &handle; if (hlen < sizeof(handlep->ha_fsid) || hlen > sizeof(*handlep)) return XFS_ERROR(EINVAL); if (copy_from_user(handlep, hanp, hlen)) return XFS_ERROR(EFAULT); if (hlen < sizeof(*handlep)) memset(((char *)handlep) + hlen, 0, sizeof(*handlep) - hlen); if (hlen > sizeof(handlep->ha_fsid)) { if (handlep->ha_fid.fid_len != (hlen - sizeof(handlep->ha_fsid) - sizeof(handlep->ha_fid.fid_len)) || handlep->ha_fid.fid_pad) return XFS_ERROR(EINVAL); } /* * Crack the handle, obtain the inode # & generation # */ xfid = (struct xfs_fid *)&handlep->ha_fid; if (xfid->fid_len == sizeof(*xfid) - sizeof(xfid->fid_len)) { ino = xfid->fid_ino; igen = xfid->fid_gen; } else { return XFS_ERROR(EINVAL); } /* * Get the XFS inode, building a Linux inode to go with it. */ error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0); if (error) return error; if (ip == NULL) return XFS_ERROR(EIO); if (ip->i_d.di_gen != igen) { xfs_iput_new(ip, XFS_ILOCK_SHARED); return XFS_ERROR(ENOENT); } xfs_iunlock(ip, XFS_ILOCK_SHARED); *inode = VFS_I(ip); return 0; }