int xfs_qm_scall_trunc_qfiles( xfs_mount_t *mp, uint flags) { int error = 0, error2 = 0; xfs_inode_t *qip; if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) { qdprintk("qtrunc flags=%x m_qflags=%x\n", flags, mp->m_qflags); return XFS_ERROR(EINVAL); } if ((flags & XFS_DQ_USER) && mp->m_sb.sb_uquotino != NULLFSINO) { error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, 0, &qip, 0); if (!error) { error = xfs_truncate_file(mp, qip); IRELE(qip); } } if ((flags & (XFS_DQ_GROUP|XFS_DQ_PROJ)) && mp->m_sb.sb_gquotino != NULLFSINO) { error2 = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, 0, &qip, 0); if (!error2) { error2 = xfs_truncate_file(mp, qip); IRELE(qip); } } return error ? error : error2; }
STATIC int xfs_qm_scall_trunc_qfiles( xfs_mount_t *mp, uint flags) { int error; xfs_inode_t *qip; if (!capable(CAP_SYS_ADMIN)) return XFS_ERROR(EPERM); error = 0; if (!XFS_SB_VERSION_HASQUOTA(&mp->m_sb) || flags == 0) { qdprintk("qtrunc flags=%x m_qflags=%x\n", flags, mp->m_qflags); return XFS_ERROR(EINVAL); } if ((flags & XFS_DQ_USER) && mp->m_sb.sb_uquotino != NULLFSINO) { error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, 0, &qip, 0); if (! error) { (void) xfs_truncate_file(mp, qip); VN_RELE(XFS_ITOV(qip)); } } if ((flags & (XFS_DQ_GROUP|XFS_DQ_PROJ)) && mp->m_sb.sb_gquotino != NULLFSINO) { error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, 0, &qip, 0); if (! error) { (void) xfs_truncate_file(mp, qip); VN_RELE(XFS_ITOV(qip)); } } return (error); }
STATIC struct inode * xfs_nfs_get_inode( struct super_block *sb, u64 ino, u32 generation) { xfs_mount_t *mp = XFS_M(sb); xfs_inode_t *ip; int error; if (ino == 0) return ERR_PTR(-ESTALE); error = xfs_iget(mp, NULL, ino, XFS_IGET_UNTRUSTED, 0, &ip); if (error) { if (error == EINVAL || error == ENOENT) error = ESTALE; return ERR_PTR(-error); } if (ip->i_d.di_gen != generation) { IRELE(ip); return ERR_PTR(-ESTALE); } return VFS_I(ip); }
STATIC struct inode * xfs_nfs_get_inode( struct super_block *sb, u64 ino, u32 generation) { xfs_mount_t *mp = XFS_M(sb); xfs_inode_t *ip; int error; /* * NFS can sometimes send requests for ino 0. Fail them gracefully. */ if (ino == 0) return ERR_PTR(-ESTALE); error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0); if (error) return ERR_PTR(-error); if (!ip) return ERR_PTR(-EIO); if (ip->i_d.di_gen != generation) { xfs_iput_new(ip, XFS_ILOCK_SHARED); return ERR_PTR(-ENOENT); } xfs_iunlock(ip, XFS_ILOCK_SHARED); return VFS_I(ip); }
int xfs_getinode(struct vfs *vfsp, dev_t dev, ino_t inode, struct xfs_inode **ipp) { struct xfs_inode *ip; int error; if (!vfsp) { vfsp = vfs_devsearch(dev, xfs_fstype); if (!vfsp) { SET_XFS_ERROR(1, dev, inode); return ENXIO; } } if (error = xfs_iget((((struct mount *) ((vfsp)->vfs_bh.bh_first)->bd_pdata)), (void *)0, (xfs_ino_t) inode, XFS_ILOCK_SHARED, &ip, (daddr_t) 0)) { SET_XFS_ERROR(3, vfsp->vfs_dev, inode); return error; } *ipp = ip; return 0; }
STATIC int xfs_qm_scall_trunc_qfile( struct xfs_mount *mp, xfs_ino_t ino) { struct xfs_inode *ip; struct xfs_trans *tp; int error; if (ino == NULLFSINO) return 0; error = xfs_iget(mp, NULL, ino, 0, 0, &ip); if (error) return error; xfs_ilock(ip, XFS_IOLOCK_EXCL); tp = xfs_trans_alloc(mp, XFS_TRANS_TRUNCATE_FILE); error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, XFS_ITRUNCATE_LOG_COUNT); if (error) { xfs_trans_cancel(tp, 0); xfs_iunlock(ip, XFS_IOLOCK_EXCL); goto out_put; } xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip, 0); ip->i_d.di_size = 0; xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0); if (error) { xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); goto out_unlock; } ASSERT(ip->i_d.di_nextents == 0); xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); out_unlock: xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); out_put: IRELE(ip); return error; }
int xfs_dir_lookup_int( bhv_desc_t *dir_bdp, uint lock_mode, vname_t *dentry, xfs_ino_t *inum, xfs_inode_t **ipp) { vnode_t *dir_vp; xfs_inode_t *dp; int error; dir_vp = BHV_TO_VNODE(dir_bdp); vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address); dp = XFS_BHVTOI(dir_bdp); error = XFS_DIR_LOOKUP(dp->i_mount, NULL, dp, VNAME(dentry), VNAMELEN(dentry), inum); if (!error) { /* * Unlock the directory. We do this because we can't * hold the directory lock while doing the vn_get() * in xfs_iget(). Doing so could cause us to hold * a lock while waiting for the inode to finish * being inactive while it's waiting for a log * reservation in the inactive routine. */ xfs_iunlock(dp, lock_mode); error = xfs_iget(dp->i_mount, NULL, *inum, 0, 0, ipp, 0); xfs_ilock(dp, lock_mode); if (error) { *ipp = NULL; } else if ((*ipp)->i_d.di_mode == 0) { /* * The inode has been freed. Something is * wrong so just get out of here. */ xfs_iunlock(dp, lock_mode); xfs_iput_new(*ipp, 0); *ipp = NULL; xfs_ilock(dp, lock_mode); error = XFS_ERROR(ENOENT); } } return error; }
/* * "Is this a cached inode that's also allocated?" * * Look up an inode by number in the given file system. If the inode is * in cache and isn't in purgatory, return 1 if the inode is allocated * and 0 if it is not. For all other cases (not in cache, being torn * down, etc.), return a negative error code. * * The caller has to prevent inode allocation and freeing activity, * presumably by locking the AGI buffer. This is to ensure that an * inode cannot transition from allocated to freed until the caller is * ready to allow that. If the inode is in an intermediate state (new, * reclaimable, or being reclaimed), -EAGAIN will be returned; if the * inode is not in the cache, -ENOENT will be returned. The caller must * deal with these scenarios appropriately. * * This is a specialized use case for the online scrubber; if you're * reading this, you probably want xfs_iget. */ int xfs_icache_inode_is_allocated( struct xfs_mount *mp, struct xfs_trans *tp, xfs_ino_t ino, bool *inuse) { struct xfs_inode *ip; int error; error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip); if (error) return error; *inuse = !!(VFS_I(ip)->i_mode); xfs_irele(ip); return 0; }
/* * Get an inode and join it to the transaction. */ int xfs_trans_iget( xfs_mount_t *mp, xfs_trans_t *tp, xfs_ino_t ino, uint flags, uint lock_flags, xfs_inode_t **ipp) { int error; error = xfs_iget(mp, tp, ino, flags, lock_flags, ipp); if (!error && tp) { xfs_trans_ijoin(tp, *ipp); (*ipp)->i_itemp->ili_lock_flags = lock_flags; } return error; }
STATIC struct inode * xfs_nfs_get_inode( struct super_block *sb, u64 ino, u32 generation) { xfs_mount_t *mp = XFS_M(sb); xfs_inode_t *ip; int error; /* * NFS can sometimes send requests for ino 0. Fail them gracefully. */ if (ino == 0) return ERR_PTR(-ESTALE); /* * The XFS_IGET_UNTRUSTED means that an invalid inode number is just * fine and not an indication of a corrupted filesystem as clients can * send invalid file handles and we have to handle it gracefully.. */ error = xfs_iget(mp, NULL, ino, XFS_IGET_UNTRUSTED, XFS_ILOCK_SHARED, &ip); if (error) { /* * EINVAL means the inode cluster doesn't exist anymore. * This implies the filehandle is stale, so we should * translate it here. * We don't use ESTALE directly down the chain to not * confuse applications using bulkstat that expect EINVAL. */ if (error == EINVAL) error = ESTALE; return ERR_PTR(-error); } if (ip->i_d.di_gen != generation) { xfs_iput_new(ip, XFS_ILOCK_SHARED); return ERR_PTR(-ENOENT); } xfs_iunlock(ip, XFS_ILOCK_SHARED); return VFS_I(ip); }
int xfs_dir_lookup_int( xfs_inode_t *dp, uint lock_mode, bhv_vname_t *dentry, xfs_ino_t *inum, xfs_inode_t **ipp) { int error; xfs_itrace_entry(dp); error = xfs_dir_lookup(NULL, dp, VNAME(dentry), VNAMELEN(dentry), inum); if (!error) { /* * Unlock the directory. We do this because we can't * hold the directory lock while doing the vn_get() * in xfs_iget(). Doing so could cause us to hold * a lock while waiting for the inode to finish * being inactive while it's waiting for a log * reservation in the inactive routine. */ xfs_iunlock(dp, lock_mode); error = xfs_iget(dp->i_mount, NULL, *inum, 0, 0, ipp, 0); xfs_ilock(dp, lock_mode); if (error) { *ipp = NULL; } else if ((*ipp)->i_d.di_mode == 0) { /* * The inode has been freed. Something is * wrong so just get out of here. */ xfs_iunlock(dp, lock_mode); xfs_iput_new(*ipp, 0); *ipp = NULL; xfs_ilock(dp, lock_mode); error = XFS_ERROR(ENOENT); } } return error; }
STATIC int xfs_qm_internalqcheck_adjust( xfs_mount_t *mp, /* mount point for filesystem */ xfs_ino_t ino, /* inode number to get data for */ void __user *buffer, /* not used */ int ubsize, /* not used */ void *private_data, /* not used */ xfs_daddr_t bno, /* starting block of inode cluster */ int *ubused, /* not used */ void *dip, /* not used */ int *res) /* bulkstat result code */ { xfs_inode_t *ip; xfs_dqtest_t *ud, *gd; uint lock_flags; boolean_t ipreleased; int error; ASSERT(XFS_IS_QUOTA_RUNNING(mp)); if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) { *res = BULKSTAT_RV_NOTHING; qdprintk("internalqcheck: ino=%llu, uqino=%llu, gqino=%llu\n", (unsigned long long) ino, (unsigned long long) mp->m_sb.sb_uquotino, (unsigned long long) mp->m_sb.sb_gquotino); return XFS_ERROR(EINVAL); } ipreleased = B_FALSE; again: lock_flags = XFS_ILOCK_SHARED; if ((error = xfs_iget(mp, NULL, ino, 0, lock_flags, &ip, bno))) { *res = BULKSTAT_RV_NOTHING; return (error); } if (ip->i_d.di_mode == 0) { xfs_iput_new(ip, lock_flags); *res = BULKSTAT_RV_NOTHING; return XFS_ERROR(ENOENT); } /* * This inode can have blocks after eof which can get released * when we send it to inactive. Since we don't check the dquot * until the after all our calculations are done, we must get rid * of those now. */ if (! ipreleased) { xfs_iput(ip, lock_flags); ipreleased = B_TRUE; goto again; } xfs_qm_internalqcheck_get_dquots(mp, (xfs_dqid_t) ip->i_d.di_uid, (xfs_dqid_t) ip->i_d.di_projid, (xfs_dqid_t) ip->i_d.di_gid, &ud, &gd); if (XFS_IS_UQUOTA_ON(mp)) { ASSERT(ud); xfs_qm_internalqcheck_dqadjust(ip, ud); } if (XFS_IS_OQUOTA_ON(mp)) { ASSERT(gd); xfs_qm_internalqcheck_dqadjust(ip, gd); } xfs_iput(ip, lock_flags); *res = BULKSTAT_RV_DIDONE; return (0); }
/* * This is called after the superblock has been read in and we're ready to * iget the quota inodes. */ STATIC int xfs_qm_init_quotainos( xfs_mount_t *mp) { xfs_inode_t *uip, *gip; int error; __int64_t sbflags; uint flags; ASSERT(mp->m_quotainfo); uip = gip = NULL; sbflags = 0; flags = 0; /* * Get the uquota and gquota inodes */ if (xfs_sb_version_hasquota(&mp->m_sb)) { if (XFS_IS_UQUOTA_ON(mp) && mp->m_sb.sb_uquotino != NULLFSINO) { ASSERT(mp->m_sb.sb_uquotino > 0); if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, 0, &uip))) return XFS_ERROR(error); } if (XFS_IS_OQUOTA_ON(mp) && mp->m_sb.sb_gquotino != NULLFSINO) { ASSERT(mp->m_sb.sb_gquotino > 0); if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, 0, &gip))) { if (uip) IRELE(uip); return XFS_ERROR(error); } } } else { flags |= XFS_QMOPT_SBVERSION; sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | XFS_SB_GQUOTINO | XFS_SB_QFLAGS); } /* * Create the two inodes, if they don't exist already. The changes * made above will get added to a transaction and logged in one of * the qino_alloc calls below. If the device is readonly, * temporarily switch to read-write to do this. */ if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) { if ((error = xfs_qm_qino_alloc(mp, &uip, sbflags | XFS_SB_UQUOTINO, flags | XFS_QMOPT_UQUOTA))) return XFS_ERROR(error); flags &= ~XFS_QMOPT_SBVERSION; } if (XFS_IS_OQUOTA_ON(mp) && gip == NULL) { flags |= (XFS_IS_GQUOTA_ON(mp) ? XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA); error = xfs_qm_qino_alloc(mp, &gip, sbflags | XFS_SB_GQUOTINO, flags); if (error) { if (uip) IRELE(uip); return XFS_ERROR(error); } } mp->m_quotainfo->qi_uquotaip = uip; mp->m_quotainfo->qi_gquotaip = gip; return 0; }
/* ARGSUSED */ STATIC int xfs_qm_dqusage_adjust( xfs_mount_t *mp, /* mount point for filesystem */ xfs_ino_t ino, /* inode number to get data for */ void __user *buffer, /* not used */ int ubsize, /* not used */ int *ubused, /* not used */ int *res) /* result code value */ { xfs_inode_t *ip; xfs_qcnt_t nblks, rtblks = 0; int error; ASSERT(XFS_IS_QUOTA_RUNNING(mp)); /* * rootino must have its resources accounted for, not so with the quota * inodes. */ if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) { *res = BULKSTAT_RV_NOTHING; return XFS_ERROR(EINVAL); } /* * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget * interface expects the inode to be exclusively locked because that's * the case in all other instances. It's OK that we do this because * quotacheck is done only at mount time. */ error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip); if (error) { *res = BULKSTAT_RV_NOTHING; return error; } ASSERT(ip->i_delayed_blks == 0); if (XFS_IS_REALTIME_INODE(ip)) { /* * Walk thru the extent list and count the realtime blocks. */ error = xfs_qm_get_rtblks(ip, &rtblks); if (error) goto error0; } nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks; /* * Add the (disk blocks and inode) resources occupied by this * inode to its dquots. We do this adjustment in the incore dquot, * and also copy the changes to its buffer. * We don't care about putting these changes in a transaction * envelope because if we crash in the middle of a 'quotacheck' * we have to start from the beginning anyway. * Once we're done, we'll log all the dquot bufs. * * The *QUOTA_ON checks below may look pretty racy, but quotachecks * and quotaoffs don't race. (Quotachecks happen at mount time only). */ if (XFS_IS_UQUOTA_ON(mp)) { error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid, XFS_DQ_USER, nblks, rtblks); if (error) goto error0; } if (XFS_IS_GQUOTA_ON(mp)) { error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid, XFS_DQ_GROUP, nblks, rtblks); if (error) goto error0; } if (XFS_IS_PQUOTA_ON(mp)) { error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip), XFS_DQ_PROJ, nblks, rtblks); if (error) goto error0; } xfs_iunlock(ip, XFS_ILOCK_EXCL); IRELE(ip); *res = BULKSTAT_RV_DIDONE; return 0; error0: xfs_iunlock(ip, XFS_ILOCK_EXCL); IRELE(ip); *res = BULKSTAT_RV_GIVEUP; return error; }
/* * Return quota status information, such as uquota-off, enforcements, etc. */ int xfs_qm_scall_getqstat( struct xfs_mount *mp, struct fs_quota_stat *out) { struct xfs_quotainfo *q = mp->m_quotainfo; struct xfs_inode *uip, *gip; boolean_t tempuqip, tempgqip; uip = gip = NULL; tempuqip = tempgqip = B_FALSE; memset(out, 0, sizeof(fs_quota_stat_t)); out->qs_version = FS_QSTAT_VERSION; if (!xfs_sb_version_hasquota(&mp->m_sb)) { out->qs_uquota.qfs_ino = NULLFSINO; out->qs_gquota.qfs_ino = NULLFSINO; return (0); } out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags & (XFS_ALL_QUOTA_ACCT| XFS_ALL_QUOTA_ENFD)); out->qs_pad = 0; out->qs_uquota.qfs_ino = mp->m_sb.sb_uquotino; out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino; if (q) { uip = q->qi_uquotaip; gip = q->qi_gquotaip; } if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) { if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, 0, &uip) == 0) tempuqip = B_TRUE; } if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) { if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, 0, &gip) == 0) tempgqip = B_TRUE; } if (uip) { out->qs_uquota.qfs_nblks = uip->i_d.di_nblocks; out->qs_uquota.qfs_nextents = uip->i_d.di_nextents; if (tempuqip) IRELE(uip); } if (gip) { out->qs_gquota.qfs_nblks = gip->i_d.di_nblocks; out->qs_gquota.qfs_nextents = gip->i_d.di_nextents; if (tempgqip) IRELE(gip); } if (q) { out->qs_incoredqs = q->qi_dquots; out->qs_btimelimit = q->qi_btimelimit; out->qs_itimelimit = q->qi_itimelimit; out->qs_rtbtimelimit = q->qi_rtbtimelimit; out->qs_bwarnlimit = q->qi_bwarnlimit; out->qs_iwarnlimit = q->qi_iwarnlimit; } return 0; }
/* * Convert userspace handle data into vnode (and inode). * We [ab]use the fact that all the fsop_handlereq ioctl calls * have a data structure argument whose first component is always * a xfs_fsop_handlereq_t, so we can cast to and from this type. * This allows us to optimise the copy_from_user calls and gives * a handy, shared routine. * * If no error, caller must always VN_RELE the returned vp. */ STATIC int xfs_vget_fsop_handlereq( xfs_mount_t *mp, struct inode *parinode, /* parent inode pointer */ int cap, /* capability level for op */ unsigned long arg, /* userspace data pointer */ unsigned long size, /* size of expected struct */ /* output arguments */ xfs_fsop_handlereq_t *hreq, vnode_t **vp, struct inode **inode) { void *hanp; size_t hlen; xfs_fid_t *xfid; xfs_handle_t *handlep; xfs_handle_t handle; xfs_inode_t *ip; struct inode *inodep; vnode_t *vpp; xfs_ino_t ino; __u32 igen; int error; if (!capable(cap)) return XFS_ERROR(EPERM); /* * Only allow handle opens under a directory. */ if (!S_ISDIR(parinode->i_mode)) return XFS_ERROR(ENOTDIR); /* * Copy the handle down from the user and validate * that it looks to be in the correct format. */ if (copy_from_user(hreq, (struct xfs_fsop_handlereq *)arg, size)) return XFS_ERROR(EFAULT); hanp = hreq->ihandle; hlen = hreq->ihandlen; handlep = &handle; if (hlen < sizeof(handlep->ha_fsid) || hlen > sizeof(*handlep)) return XFS_ERROR(EINVAL); if (copy_from_user(handlep, hanp, hlen)) return XFS_ERROR(EFAULT); if (hlen < sizeof(*handlep)) memset(((char *)handlep) + hlen, 0, sizeof(*handlep) - hlen); if (hlen > sizeof(handlep->ha_fsid)) { if (handlep->ha_fid.xfs_fid_len != (hlen - sizeof(handlep->ha_fsid) - sizeof(handlep->ha_fid.xfs_fid_len)) || handlep->ha_fid.xfs_fid_pad) return XFS_ERROR(EINVAL); } /* * Crack the handle, obtain the inode # & generation # */ xfid = (struct xfs_fid *)&handlep->ha_fid; if (xfid->xfs_fid_len == sizeof(*xfid) - sizeof(xfid->xfs_fid_len)) { ino = xfid->xfs_fid_ino; igen = xfid->xfs_fid_gen; } else { return XFS_ERROR(EINVAL); } /* * Get the XFS inode, building a vnode to go with it. */ error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0); if (error) return error; if (ip == NULL) return XFS_ERROR(EIO); if (ip->i_d.di_mode == 0 || ip->i_d.di_gen != igen) { xfs_iput_new(ip, XFS_ILOCK_SHARED); return XFS_ERROR(ENOENT); } vpp = XFS_ITOV(ip); inodep = LINVFS_GET_IP(vpp); xfs_iunlock(ip, XFS_ILOCK_SHARED); *vp = vpp; *inode = inodep; return 0; }
/* * This is called after the superblock has been read in and we're ready to * iget the quota inodes. */ STATIC int xfs_qm_init_quotainos( xfs_mount_t *mp) { struct xfs_inode *uip = NULL; struct xfs_inode *gip = NULL; struct xfs_inode *pip = NULL; int error; uint flags = 0; ASSERT(mp->m_quotainfo); /* * Get the uquota and gquota inodes */ if (xfs_sb_version_hasquota(&mp->m_sb)) { if (XFS_IS_UQUOTA_ON(mp) && mp->m_sb.sb_uquotino != NULLFSINO) { ASSERT(mp->m_sb.sb_uquotino > 0); error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, 0, &uip); if (error) return error; } if (XFS_IS_GQUOTA_ON(mp) && mp->m_sb.sb_gquotino != NULLFSINO) { ASSERT(mp->m_sb.sb_gquotino > 0); error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, 0, &gip); if (error) goto error_rele; } if (XFS_IS_PQUOTA_ON(mp) && mp->m_sb.sb_pquotino != NULLFSINO) { ASSERT(mp->m_sb.sb_pquotino > 0); error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino, 0, 0, &pip); if (error) goto error_rele; } } else { flags |= XFS_QMOPT_SBVERSION; } /* * Create the three inodes, if they don't exist already. The changes * made above will get added to a transaction and logged in one of * the qino_alloc calls below. If the device is readonly, * temporarily switch to read-write to do this. */ if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) { error = xfs_qm_qino_alloc(mp, &uip, flags | XFS_QMOPT_UQUOTA); if (error) goto error_rele; flags &= ~XFS_QMOPT_SBVERSION; } if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) { error = xfs_qm_qino_alloc(mp, &gip, flags | XFS_QMOPT_GQUOTA); if (error) goto error_rele; flags &= ~XFS_QMOPT_SBVERSION; } if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) { error = xfs_qm_qino_alloc(mp, &pip, flags | XFS_QMOPT_PQUOTA); if (error) goto error_rele; } mp->m_quotainfo->qi_uquotaip = uip; mp->m_quotainfo->qi_gquotaip = gip; mp->m_quotainfo->qi_pquotaip = pip; return 0; error_rele: if (uip) IRELE(uip); if (gip) IRELE(gip); if (pip) IRELE(pip); return error; }
/* * Convert userspace handle data into inode. * * We use the fact that all the fsop_handlereq ioctl calls have a data * structure argument whose first component is always a xfs_fsop_handlereq_t, * so we can pass that sub structure into this handy, shared routine. * * If no error, caller must always iput the returned inode. */ STATIC int xfs_vget_fsop_handlereq( xfs_mount_t *mp, struct inode *parinode, /* parent inode pointer */ xfs_fsop_handlereq_t *hreq, struct inode **inode) { void __user *hanp; size_t hlen; xfs_fid_t *xfid; xfs_handle_t *handlep; xfs_handle_t handle; xfs_inode_t *ip; xfs_ino_t ino; __u32 igen; int error; /* * Only allow handle opens under a directory. */ if (!S_ISDIR(parinode->i_mode)) return XFS_ERROR(ENOTDIR); hanp = hreq->ihandle; hlen = hreq->ihandlen; handlep = &handle; if (hlen < sizeof(handlep->ha_fsid) || hlen > sizeof(*handlep)) return XFS_ERROR(EINVAL); if (copy_from_user(handlep, hanp, hlen)) return XFS_ERROR(EFAULT); if (hlen < sizeof(*handlep)) memset(((char *)handlep) + hlen, 0, sizeof(*handlep) - hlen); if (hlen > sizeof(handlep->ha_fsid)) { if (handlep->ha_fid.fid_len != (hlen - sizeof(handlep->ha_fsid) - sizeof(handlep->ha_fid.fid_len)) || handlep->ha_fid.fid_pad) return XFS_ERROR(EINVAL); } /* * Crack the handle, obtain the inode # & generation # */ xfid = (struct xfs_fid *)&handlep->ha_fid; if (xfid->fid_len == sizeof(*xfid) - sizeof(xfid->fid_len)) { ino = xfid->fid_ino; igen = xfid->fid_gen; } else { return XFS_ERROR(EINVAL); } /* * Get the XFS inode, building a Linux inode to go with it. */ error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0); if (error) return error; if (ip == NULL) return XFS_ERROR(EIO); if (ip->i_d.di_gen != igen) { xfs_iput_new(ip, XFS_ILOCK_SHARED); return XFS_ERROR(ENOENT); } xfs_iunlock(ip, XFS_ILOCK_SHARED); *inode = VFS_I(ip); return 0; }
/* * Return stat information for one inode. * Return 0 if ok, else errno. */ int xfs_bulkstat_one_int( struct xfs_mount *mp, /* mount point for filesystem */ xfs_ino_t ino, /* inode to get data for */ void __user *buffer, /* buffer to place output in */ int ubsize, /* size of buffer */ bulkstat_one_fmt_pf formatter, /* formatter, copy to user */ int *ubused, /* bytes used by me */ int *stat) /* BULKSTAT_RV_... */ { struct xfs_icdinode *dic; /* dinode core info pointer */ struct xfs_inode *ip; /* incore inode pointer */ struct xfs_bstat *buf; /* return buffer */ int error = 0; /* error value */ *stat = BULKSTAT_RV_NOTHING; if (!buffer || xfs_internal_inum(mp, ino)) return -EINVAL; buf = kmem_alloc(sizeof(*buf), KM_SLEEP | KM_MAYFAIL); if (!buf) return -ENOMEM; error = xfs_iget(mp, NULL, ino, (XFS_IGET_DONTCACHE | XFS_IGET_UNTRUSTED), XFS_ILOCK_SHARED, &ip); if (error) goto out_free; ASSERT(ip != NULL); ASSERT(ip->i_imap.im_blkno != 0); dic = &ip->i_d; /* xfs_iget returns the following without needing * further change. */ buf->bs_nlink = dic->di_nlink; buf->bs_projid_lo = dic->di_projid_lo; buf->bs_projid_hi = dic->di_projid_hi; buf->bs_ino = ino; buf->bs_mode = dic->di_mode; buf->bs_uid = dic->di_uid; buf->bs_gid = dic->di_gid; buf->bs_size = dic->di_size; buf->bs_atime.tv_sec = dic->di_atime.t_sec; buf->bs_atime.tv_nsec = dic->di_atime.t_nsec; buf->bs_mtime.tv_sec = dic->di_mtime.t_sec; buf->bs_mtime.tv_nsec = dic->di_mtime.t_nsec; buf->bs_ctime.tv_sec = dic->di_ctime.t_sec; buf->bs_ctime.tv_nsec = dic->di_ctime.t_nsec; buf->bs_xflags = xfs_ip2xflags(ip); buf->bs_extsize = dic->di_extsize << mp->m_sb.sb_blocklog; buf->bs_extents = dic->di_nextents; buf->bs_gen = dic->di_gen; memset(buf->bs_pad, 0, sizeof(buf->bs_pad)); buf->bs_dmevmask = dic->di_dmevmask; buf->bs_dmstate = dic->di_dmstate; buf->bs_aextents = dic->di_anextents; buf->bs_forkoff = XFS_IFORK_BOFF(ip); switch (dic->di_format) { case XFS_DINODE_FMT_DEV: buf->bs_rdev = ip->i_df.if_u2.if_rdev; buf->bs_blksize = BLKDEV_IOSIZE; buf->bs_blocks = 0; break; case XFS_DINODE_FMT_LOCAL: case XFS_DINODE_FMT_UUID: buf->bs_rdev = 0; buf->bs_blksize = mp->m_sb.sb_blocksize; buf->bs_blocks = 0; break; case XFS_DINODE_FMT_EXTENTS: case XFS_DINODE_FMT_BTREE: buf->bs_rdev = 0; buf->bs_blksize = mp->m_sb.sb_blocksize; buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks; break; } xfs_iunlock(ip, XFS_ILOCK_SHARED); IRELE(ip); error = formatter(buffer, ubsize, ubused, buf); if (!error) *stat = BULKSTAT_RV_DIDONE; out_free: kmem_free(buf); return error; }
/* * Process a bmap update intent item that was recovered from the log. * We need to update some inode's bmbt. */ int xfs_bui_recover( struct xfs_mount *mp, struct xfs_bui_log_item *buip, struct xfs_defer_ops *dfops) { int error = 0; unsigned int bui_type; struct xfs_map_extent *bmap; xfs_fsblock_t startblock_fsb; xfs_fsblock_t inode_fsb; xfs_filblks_t count; bool op_ok; struct xfs_bud_log_item *budp; enum xfs_bmap_intent_type type; int whichfork; xfs_exntst_t state; struct xfs_trans *tp; struct xfs_inode *ip = NULL; struct xfs_bmbt_irec irec; ASSERT(!test_bit(XFS_BUI_RECOVERED, &buip->bui_flags)); /* Only one mapping operation per BUI... */ if (buip->bui_format.bui_nextents != XFS_BUI_MAX_FAST_EXTENTS) { set_bit(XFS_BUI_RECOVERED, &buip->bui_flags); xfs_bui_release(buip); return -EIO; } /* * First check the validity of the extent described by the * BUI. If anything is bad, then toss the BUI. */ bmap = &buip->bui_format.bui_extents[0]; startblock_fsb = XFS_BB_TO_FSB(mp, XFS_FSB_TO_DADDR(mp, bmap->me_startblock)); inode_fsb = XFS_BB_TO_FSB(mp, XFS_FSB_TO_DADDR(mp, XFS_INO_TO_FSB(mp, bmap->me_owner))); switch (bmap->me_flags & XFS_BMAP_EXTENT_TYPE_MASK) { case XFS_BMAP_MAP: case XFS_BMAP_UNMAP: op_ok = true; break; default: op_ok = false; break; } if (!op_ok || startblock_fsb == 0 || bmap->me_len == 0 || inode_fsb == 0 || startblock_fsb >= mp->m_sb.sb_dblocks || bmap->me_len >= mp->m_sb.sb_agblocks || inode_fsb >= mp->m_sb.sb_dblocks || (bmap->me_flags & ~XFS_BMAP_EXTENT_FLAGS)) { /* * This will pull the BUI from the AIL and * free the memory associated with it. */ set_bit(XFS_BUI_RECOVERED, &buip->bui_flags); xfs_bui_release(buip); return -EIO; } error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK), 0, 0, &tp); if (error) return error; budp = xfs_trans_get_bud(tp, buip); /* Grab the inode. */ error = xfs_iget(mp, tp, bmap->me_owner, 0, XFS_ILOCK_EXCL, &ip); if (error) goto err_inode; if (VFS_I(ip)->i_nlink == 0) xfs_iflags_set(ip, XFS_IRECOVERY); /* Process deferred bmap item. */ state = (bmap->me_flags & XFS_BMAP_EXTENT_UNWRITTEN) ? XFS_EXT_UNWRITTEN : XFS_EXT_NORM; whichfork = (bmap->me_flags & XFS_BMAP_EXTENT_ATTR_FORK) ? XFS_ATTR_FORK : XFS_DATA_FORK; bui_type = bmap->me_flags & XFS_BMAP_EXTENT_TYPE_MASK; switch (bui_type) { case XFS_BMAP_MAP: case XFS_BMAP_UNMAP: type = bui_type; break; default: error = -EFSCORRUPTED; goto err_inode; } xfs_trans_ijoin(tp, ip, 0); count = bmap->me_len; error = xfs_trans_log_finish_bmap_update(tp, budp, dfops, type, ip, whichfork, bmap->me_startoff, bmap->me_startblock, &count, state); if (error) goto err_inode; if (count > 0) { ASSERT(type == XFS_BMAP_UNMAP); irec.br_startblock = bmap->me_startblock; irec.br_blockcount = count; irec.br_startoff = bmap->me_startoff; irec.br_state = state; error = xfs_bmap_unmap_extent(tp->t_mountp, dfops, ip, &irec); if (error) goto err_inode; } set_bit(XFS_BUI_RECOVERED, &buip->bui_flags); error = xfs_trans_commit(tp); xfs_iunlock(ip, XFS_ILOCK_EXCL); IRELE(ip); return error; err_inode: xfs_trans_cancel(tp); if (ip) { xfs_iunlock(ip, XFS_ILOCK_EXCL); IRELE(ip); } return error; }
/* * Return quota status information, such as uquota-off, enforcements, etc. * for Q_XGETQSTATV command, to support separate project quota field. */ int xfs_qm_scall_getqstatv( struct xfs_mount *mp, struct fs_quota_statv *out) { struct xfs_quotainfo *q = mp->m_quotainfo; struct xfs_inode *uip = NULL; struct xfs_inode *gip = NULL; struct xfs_inode *pip = NULL; bool tempuqip = false; bool tempgqip = false; bool temppqip = false; if (!xfs_sb_version_hasquota(&mp->m_sb)) { out->qs_uquota.qfs_ino = NULLFSINO; out->qs_gquota.qfs_ino = NULLFSINO; out->qs_pquota.qfs_ino = NULLFSINO; return (0); } out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags & (XFS_ALL_QUOTA_ACCT| XFS_ALL_QUOTA_ENFD)); out->qs_uquota.qfs_ino = mp->m_sb.sb_uquotino; out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino; out->qs_pquota.qfs_ino = mp->m_sb.sb_pquotino; if (q) { uip = q->qi_uquotaip; gip = q->qi_gquotaip; pip = q->qi_pquotaip; } if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) { if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, 0, &uip) == 0) tempuqip = true; } if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) { if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, 0, &gip) == 0) tempgqip = true; } if (!pip && mp->m_sb.sb_pquotino != NULLFSINO) { if (xfs_iget(mp, NULL, mp->m_sb.sb_pquotino, 0, 0, &pip) == 0) temppqip = true; } if (uip) { out->qs_uquota.qfs_nblks = uip->i_d.di_nblocks; out->qs_uquota.qfs_nextents = uip->i_d.di_nextents; if (tempuqip) IRELE(uip); } if (gip) { out->qs_gquota.qfs_nblks = gip->i_d.di_nblocks; out->qs_gquota.qfs_nextents = gip->i_d.di_nextents; if (tempgqip) IRELE(gip); } if (pip) { out->qs_pquota.qfs_nblks = pip->i_d.di_nblocks; out->qs_pquota.qfs_nextents = pip->i_d.di_nextents; if (temppqip) IRELE(pip); } if (q) { out->qs_incoredqs = q->qi_dquots; out->qs_btimelimit = q->qi_btimelimit; out->qs_itimelimit = q->qi_itimelimit; out->qs_rtbtimelimit = q->qi_rtbtimelimit; out->qs_bwarnlimit = q->qi_bwarnlimit; out->qs_iwarnlimit = q->qi_iwarnlimit; } return 0; }
/* * Create an inode and return with a reference already taken, but unlocked * This is how we create quota inodes */ STATIC int xfs_qm_qino_alloc( xfs_mount_t *mp, xfs_inode_t **ip, uint flags) { xfs_trans_t *tp; int error; int committed; bool need_alloc = true; *ip = NULL; /* * With superblock that doesn't have separate pquotino, we * share an inode between gquota and pquota. If the on-disk * superblock has GQUOTA and the filesystem is now mounted * with PQUOTA, just use sb_gquotino for sb_pquotino and * vice-versa. */ if (!xfs_sb_version_has_pquotino(&mp->m_sb) && (flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) { xfs_ino_t ino = NULLFSINO; if ((flags & XFS_QMOPT_PQUOTA) && (mp->m_sb.sb_gquotino != NULLFSINO)) { ino = mp->m_sb.sb_gquotino; ASSERT(mp->m_sb.sb_pquotino == NULLFSINO); } else if ((flags & XFS_QMOPT_GQUOTA) && (mp->m_sb.sb_pquotino != NULLFSINO)) { ino = mp->m_sb.sb_pquotino; ASSERT(mp->m_sb.sb_gquotino == NULLFSINO); } if (ino != NULLFSINO) { error = xfs_iget(mp, NULL, ino, 0, 0, ip); if (error) return error; mp->m_sb.sb_gquotino = NULLFSINO; mp->m_sb.sb_pquotino = NULLFSINO; need_alloc = false; } } tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE); error = xfs_trans_reserve(tp, &M_RES(mp)->tr_create, XFS_QM_QINOCREATE_SPACE_RES(mp), 0); if (error) { xfs_trans_cancel(tp, 0); return error; } if (need_alloc) { error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip, &committed); if (error) { xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); return error; } } /* * Make the changes in the superblock, and log those too. * sbfields arg may contain fields other than *QUOTINO; * VERSIONNUM for example. */ spin_lock(&mp->m_sb_lock); if (flags & XFS_QMOPT_SBVERSION) { ASSERT(!xfs_sb_version_hasquota(&mp->m_sb)); xfs_sb_version_addquota(&mp->m_sb); mp->m_sb.sb_uquotino = NULLFSINO; mp->m_sb.sb_gquotino = NULLFSINO; mp->m_sb.sb_pquotino = NULLFSINO; /* qflags will get updated fully _after_ quotacheck */ mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT; } if (flags & XFS_QMOPT_UQUOTA) mp->m_sb.sb_uquotino = (*ip)->i_ino; else if (flags & XFS_QMOPT_GQUOTA) mp->m_sb.sb_gquotino = (*ip)->i_ino; else mp->m_sb.sb_pquotino = (*ip)->i_ino; spin_unlock(&mp->m_sb_lock); xfs_log_sb(tp); error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); if (error) { ASSERT(XFS_FORCED_SHUTDOWN(mp)); xfs_alert(mp, "%s failed (error %d)!", __func__, error); } if (need_alloc) xfs_finish_inode_setup(*ip); return error; }
/* * Get and lock the inode for the caller if it is not already * locked within the given transaction. If it is already locked * within the transaction, just increment its lock recursion count * and return a pointer to it. * * For an inode to be locked in a transaction, the inode lock, as * opposed to the io lock, must be taken exclusively. This ensures * that the inode can be involved in only 1 transaction at a time. * Lock recursion is handled on the io lock, but only for lock modes * of equal or lesser strength. That is, you can recur on the io lock * held EXCL with a SHARED request but not vice versa. Also, if * the inode is already a part of the transaction then you cannot * go from not holding the io lock to having it EXCL or SHARED. * * Use the inode cache routine xfs_inode_incore() to find the inode * if it is already owned by this transaction. * * If we don't already own the inode, use xfs_iget() to get it. * Since the inode log item structure is embedded in the incore * inode structure and is initialized when the inode is brought * into memory, there is nothing to do with it here. * * If the given transaction pointer is NULL, just call xfs_iget(). * This simplifies code which must handle both cases. */ int xfs_trans_iget( xfs_mount_t *mp, xfs_trans_t *tp, xfs_ino_t ino, uint lock_flags, xfs_inode_t **ipp) { int error; xfs_inode_t *ip; xfs_inode_log_item_t *iip; /* * If the transaction pointer is NULL, just call the normal * xfs_iget(). */ if (tp == NULL) { return (xfs_iget(mp, NULL, ino, lock_flags, ipp, 0)); } /* * If we find the inode in core with this transaction * pointer in its i_transp field, then we know we already * have it locked. In this case we just increment the lock * recursion count and return the inode to the caller. * Assert that the inode is already locked in the mode requested * by the caller. We cannot do lock promotions yet, so * die if someone gets this wrong. */ if ((ip = xfs_inode_incore(tp->t_mountp, ino, tp)) != NULL) { /* * Make sure that the inode lock is held EXCL and * that the io lock is never upgraded when the inode * is already a part of the transaction. */ ASSERT(ip->i_itemp != NULL); ASSERT(lock_flags & XFS_ILOCK_EXCL); ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); ASSERT((!(lock_flags & XFS_IOLOCK_EXCL)) || ismrlocked(&ip->i_iolock, MR_UPDATE)); ASSERT((!(lock_flags & XFS_IOLOCK_EXCL)) || (ip->i_itemp->ili_flags & XFS_ILI_IOLOCKED_EXCL)); ASSERT((!(lock_flags & XFS_IOLOCK_SHARED)) || ismrlocked(&ip->i_iolock, (MR_UPDATE | MR_ACCESS))); ASSERT((!(lock_flags & XFS_IOLOCK_SHARED)) || (ip->i_itemp->ili_flags & XFS_ILI_IOLOCKED_ANY)); if (lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) { ip->i_itemp->ili_iolock_recur++; } if (lock_flags & XFS_ILOCK_EXCL) { ip->i_itemp->ili_ilock_recur++; } *ipp = ip; return 0; } ASSERT(lock_flags & XFS_ILOCK_EXCL); error = xfs_iget(tp->t_mountp, tp, ino, lock_flags, &ip, 0); if (error) { return error; } ASSERT(ip != NULL); /* * Get a log_item_desc to point at the new item. */ if (ip->i_itemp == NULL) xfs_inode_item_init(ip, mp); iip = ip->i_itemp; (void) xfs_trans_add_item(tp, (xfs_log_item_t *)(iip)); xfs_trans_inode_broot_debug(ip); /* * If the IO lock has been acquired, mark that in * the inode log item so we'll know to unlock it * when the transaction commits. */ ASSERT(iip->ili_flags == 0); if (lock_flags & XFS_IOLOCK_EXCL) { iip->ili_flags |= XFS_ILI_IOLOCKED_EXCL; } else if (lock_flags & XFS_IOLOCK_SHARED) { iip->ili_flags |= XFS_ILI_IOLOCKED_SHARED; } /* * Initialize i_transp so we can find it with xfs_inode_incore() * above. */ ip->i_transp = tp; *ipp = ip; return 0; }
/* * Return quota status information, such as uquota-off, enforcements, etc. */ STATIC int xfs_qm_scall_getqstat( xfs_mount_t *mp, fs_quota_stat_t *out) { xfs_inode_t *uip, *gip; boolean_t tempuqip, tempgqip; uip = gip = NULL; tempuqip = tempgqip = B_FALSE; memset(out, 0, sizeof(fs_quota_stat_t)); out->qs_version = FS_QSTAT_VERSION; if (! XFS_SB_VERSION_HASQUOTA(&mp->m_sb)) { out->qs_uquota.qfs_ino = NULLFSINO; out->qs_gquota.qfs_ino = NULLFSINO; return (0); } out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags & (XFS_ALL_QUOTA_ACCT| XFS_ALL_QUOTA_ENFD)); out->qs_pad = 0; out->qs_uquota.qfs_ino = mp->m_sb.sb_uquotino; out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino; if (mp->m_quotainfo) { uip = mp->m_quotainfo->qi_uquotaip; gip = mp->m_quotainfo->qi_gquotaip; } if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) { if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, 0, &uip, 0) == 0) tempuqip = B_TRUE; } if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) { if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, 0, &gip, 0) == 0) tempgqip = B_TRUE; } if (uip) { out->qs_uquota.qfs_nblks = uip->i_d.di_nblocks; out->qs_uquota.qfs_nextents = uip->i_d.di_nextents; if (tempuqip) VN_RELE(XFS_ITOV(uip)); } if (gip) { out->qs_gquota.qfs_nblks = gip->i_d.di_nblocks; out->qs_gquota.qfs_nextents = gip->i_d.di_nextents; if (tempgqip) VN_RELE(XFS_ITOV(gip)); } if (mp->m_quotainfo) { out->qs_incoredqs = XFS_QI_MPLNDQUOTS(mp); out->qs_btimelimit = XFS_QI_BTIMELIMIT(mp); out->qs_itimelimit = XFS_QI_ITIMELIMIT(mp); out->qs_rtbtimelimit = XFS_QI_RTBTIMELIMIT(mp); out->qs_bwarnlimit = XFS_QI_BWARNLIMIT(mp); out->qs_iwarnlimit = XFS_QI_IWARNLIMIT(mp); } return (0); }
/* * Return quota status information, such as uquota-off, enforcements, etc. * for Q_XGETQSTAT command. */ int xfs_qm_scall_getqstat( struct xfs_mount *mp, struct fs_quota_stat *out) { struct xfs_quotainfo *q = mp->m_quotainfo; struct xfs_inode *uip = NULL; struct xfs_inode *gip = NULL; struct xfs_inode *pip = NULL; bool tempuqip = false; bool tempgqip = false; bool temppqip = false; memset(out, 0, sizeof(fs_quota_stat_t)); out->qs_version = FS_QSTAT_VERSION; if (!xfs_sb_version_hasquota(&mp->m_sb)) { out->qs_uquota.qfs_ino = NULLFSINO; out->qs_gquota.qfs_ino = NULLFSINO; return (0); } out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags & (XFS_ALL_QUOTA_ACCT| XFS_ALL_QUOTA_ENFD)); if (q) { uip = q->qi_uquotaip; gip = q->qi_gquotaip; pip = q->qi_pquotaip; } if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) { if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, 0, &uip) == 0) tempuqip = true; } if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) { if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, 0, &gip) == 0) tempgqip = true; } /* * Q_XGETQSTAT doesn't have room for both group and project quotas. * So, allow the project quota values to be copied out only if * there is no group quota information available. */ if (!gip) { if (!pip && mp->m_sb.sb_pquotino != NULLFSINO) { if (xfs_iget(mp, NULL, mp->m_sb.sb_pquotino, 0, 0, &pip) == 0) temppqip = true; } } else pip = NULL; if (uip) { out->qs_uquota.qfs_ino = mp->m_sb.sb_uquotino; out->qs_uquota.qfs_nblks = uip->i_d.di_nblocks; out->qs_uquota.qfs_nextents = uip->i_d.di_nextents; if (tempuqip) IRELE(uip); } if (gip) { out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino; out->qs_gquota.qfs_nblks = gip->i_d.di_nblocks; out->qs_gquota.qfs_nextents = gip->i_d.di_nextents; if (tempgqip) IRELE(gip); } if (pip) { out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino; out->qs_gquota.qfs_nblks = pip->i_d.di_nblocks; out->qs_gquota.qfs_nextents = pip->i_d.di_nextents; if (temppqip) IRELE(pip); } if (q) { out->qs_incoredqs = q->qi_dquots; out->qs_btimelimit = q->qi_btimelimit; out->qs_itimelimit = q->qi_itimelimit; out->qs_rtbtimelimit = q->qi_rtbtimelimit; out->qs_bwarnlimit = q->qi_bwarnlimit; out->qs_iwarnlimit = q->qi_iwarnlimit; } return 0; }