/* * Q_SETQUOTA - assign an entire dqblk structure. */ static int _setquota(struct thread *td, struct mount *mp, u_long id, int type, struct dqblk64 *dqb) { struct dquot *dq; struct dquot *ndq; struct ufsmount *ump; struct dqblk64 newlim; int error; error = priv_check(td, PRIV_VFS_SETQUOTA); if (error) return (error); newlim = *dqb; ndq = NODQUOT; ump = VFSTOUFS(mp); error = dqget(NULLVP, id, ump, type, &ndq); if (error) return (error); dq = ndq; DQI_LOCK(dq); DQI_WAIT(dq, PINOD+1, "setqta"); /* * Copy all but the current values. * Reset time limit if previously had no soft limit or were * under it, but now have a soft limit and are over it. */ newlim.dqb_curblocks = dq->dq_curblocks; newlim.dqb_curinodes = dq->dq_curinodes; if (dq->dq_id != 0) { newlim.dqb_btime = dq->dq_btime; newlim.dqb_itime = dq->dq_itime; } if (newlim.dqb_bsoftlimit && dq->dq_curblocks >= newlim.dqb_bsoftlimit && (dq->dq_bsoftlimit == 0 || dq->dq_curblocks < dq->dq_bsoftlimit)) newlim.dqb_btime = time_second + ump->um_btime[type]; if (newlim.dqb_isoftlimit && dq->dq_curinodes >= newlim.dqb_isoftlimit && (dq->dq_isoftlimit == 0 || dq->dq_curinodes < dq->dq_isoftlimit)) newlim.dqb_itime = time_second + ump->um_itime[type]; dq->dq_dqb = newlim; if (dq->dq_curblocks < dq->dq_bsoftlimit) dq->dq_flags &= ~DQ_BLKS; if (dq->dq_curinodes < dq->dq_isoftlimit) dq->dq_flags &= ~DQ_INODS; if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 && dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0) dq->dq_flags |= DQ_FAKE; else dq->dq_flags &= ~DQ_FAKE; dq->dq_flags |= DQ_MOD; DQI_UNLOCK(dq); dqrele(NULLVP, dq); return (0); }
/* * Release the quota fields from an inode. */ void ufsquota_free(struct inode *ip) { int i; for (i = 0; i < MAXQUOTAS; i++) { dqrele(ITOV(ip), ip->i_dquot[i]); ip->i_dquot[i] = NODQUOT; } }
/* * Q_GETQUOTA - return current values in a dqblk structure. */ int getquota(struct mount *mp, u_long id, int type, void *addr) { struct dquot *dq; int error; if ((error = dqget(NULLVP, id, VFSTOUFS(mp), type, &dq)) != 0) return (error); error = copyout((void *)&dq->dq_dqb, addr, sizeof (struct dqblk)); dqrele(NULLVP, dq); return (error); }
/* * Q_SETQUOTA - assign an entire dqblk structure. */ int setquota(struct mount *mp, u_long id, int type, void *addr) { struct dquot *dq; struct dquot *ndq; struct ufsmount *ump = VFSTOUFS(mp); struct dqblk newlim; int error; error = copyin(addr, (void *)&newlim, sizeof (struct dqblk)); if (error) return (error); if ((error = dqget(NULLVP, id, ump, type, &ndq)) != 0) return (error); dq = ndq; mutex_enter(&dq->dq_interlock); /* * Copy all but the current values. * Reset time limit if previously had no soft limit or were * under it, but now have a soft limit and are over it. */ newlim.dqb_curblocks = dq->dq_curblocks; newlim.dqb_curinodes = dq->dq_curinodes; if (dq->dq_id != 0) { newlim.dqb_btime = dq->dq_btime; newlim.dqb_itime = dq->dq_itime; } if (newlim.dqb_bsoftlimit && dq->dq_curblocks >= newlim.dqb_bsoftlimit && (dq->dq_bsoftlimit == 0 || dq->dq_curblocks < dq->dq_bsoftlimit)) newlim.dqb_btime = time_second + ump->um_btime[type]; if (newlim.dqb_isoftlimit && dq->dq_curinodes >= newlim.dqb_isoftlimit && (dq->dq_isoftlimit == 0 || dq->dq_curinodes < dq->dq_isoftlimit)) newlim.dqb_itime = time_second + ump->um_itime[type]; dq->dq_dqb = newlim; if (dq->dq_curblocks < dq->dq_bsoftlimit) dq->dq_flags &= ~DQ_BLKS; if (dq->dq_curinodes < dq->dq_isoftlimit) dq->dq_flags &= ~DQ_INODS; if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 && dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0) dq->dq_flags |= DQ_FAKE; else dq->dq_flags &= ~DQ_FAKE; dq->dq_flags |= DQ_MOD; mutex_exit(&dq->dq_interlock); dqrele(NULLVP, dq); return (0); }
/* * Q_SETUSE - set current inode and block usage. */ static int _setuse(struct thread *td, struct mount *mp, u_long id, int type, struct dqblk64 *dqb) { struct dquot *dq; struct ufsmount *ump; struct dquot *ndq; struct dqblk64 usage; int error; error = priv_check(td, PRIV_UFS_SETUSE); if (error) return (error); usage = *dqb; ump = VFSTOUFS(mp); ndq = NODQUOT; error = dqget(NULLVP, id, ump, type, &ndq); if (error) return (error); dq = ndq; DQI_LOCK(dq); DQI_WAIT(dq, PINOD+1, "setuse"); /* * Reset time limit if have a soft limit and were * previously under it, but are now over it. */ if (dq->dq_bsoftlimit && dq->dq_curblocks < dq->dq_bsoftlimit && usage.dqb_curblocks >= dq->dq_bsoftlimit) dq->dq_btime = time_second + ump->um_btime[type]; if (dq->dq_isoftlimit && dq->dq_curinodes < dq->dq_isoftlimit && usage.dqb_curinodes >= dq->dq_isoftlimit) dq->dq_itime = time_second + ump->um_itime[type]; dq->dq_curblocks = usage.dqb_curblocks; dq->dq_curinodes = usage.dqb_curinodes; if (dq->dq_curblocks < dq->dq_bsoftlimit) dq->dq_flags &= ~DQ_BLKS; if (dq->dq_curinodes < dq->dq_isoftlimit) dq->dq_flags &= ~DQ_INODS; dq->dq_flags |= DQ_MOD; DQI_UNLOCK(dq); dqrele(NULLVP, dq); return (0); }
/* * Set up the quotas for an inode. * * This routine completely defines the semantics of quotas. * If other criterion want to be used to establish quotas, the * MAXQUOTAS value in quotas.h should be increased, and the * additional dquots set up here. */ int getinoquota(struct inode *ip) { struct ufsmount *ump = ip->i_ump; struct vnode *vp = ITOV(ip); int i, error; u_int32_t ino_ids[MAXQUOTAS]; /* * To avoid deadlocks never update quotas for quota files * on the same file system */ for (i = 0; i < MAXQUOTAS; i++) if (ITOV(ip) == ump->um_quotas[i]) return 0; ino_ids[USRQUOTA] = ip->i_uid; ino_ids[GRPQUOTA] = ip->i_gid; for (i = 0; i < MAXQUOTAS; i++) { /* * If the file id changed the quota needs update. */ if (ip->i_dquot[i] != NODQUOT && ip->i_dquot[i]->dq_id != ino_ids[i]) { dqrele(ITOV(ip), ip->i_dquot[i]); ip->i_dquot[i] = NODQUOT; } /* * Set up the quota based on file id. * EINVAL means that quotas are not enabled. */ if (ip->i_dquot[i] == NODQUOT && (error = dqget(vp, ino_ids[i], ump, i, &ip->i_dquot[i])) && error != EINVAL) return (error); } return 0; }
/* * Q_GETQUOTA - return current values in a dqblk structure. */ static int _getquota(struct thread *td, struct mount *mp, u_long id, int type, struct dqblk64 *dqb) { struct dquot *dq; int error; switch (type) { case USRQUOTA: if ((td->td_ucred->cr_uid != id) && !unprivileged_get_quota) { error = priv_check(td, PRIV_VFS_GETQUOTA); if (error) return (error); } break; case GRPQUOTA: if (!groupmember(id, td->td_ucred) && !unprivileged_get_quota) { error = priv_check(td, PRIV_VFS_GETQUOTA); if (error) return (error); } break; default: return (EINVAL); } dq = NODQUOT; error = dqget(NULLVP, id, VFSTOUFS(mp), type, &dq); if (error) return (error); *dqb = dq->dq_dqb; dqrele(NULLVP, dq); return (error); }
/* * Q_SETUSE - set current inode and block usage. */ int setuse(struct mount *mp, u_long id, int type, void *addr) { struct dquot *dq; struct ufsmount *ump = VFSTOUFS(mp); struct dquot *ndq; struct dqblk usage; int error; error = copyin(addr, (void *)&usage, sizeof (struct dqblk)); if (error) return (error); if ((error = dqget(NULLVP, id, ump, type, &ndq)) != 0) return (error); dq = ndq; mutex_enter(&dq->dq_interlock); /* * Reset time limit if have a soft limit and were * previously under it, but are now over it. */ if (dq->dq_bsoftlimit && dq->dq_curblocks < dq->dq_bsoftlimit && usage.dqb_curblocks >= dq->dq_bsoftlimit) dq->dq_btime = time_second + ump->um_btime[type]; if (dq->dq_isoftlimit && dq->dq_curinodes < dq->dq_isoftlimit && usage.dqb_curinodes >= dq->dq_isoftlimit) dq->dq_itime = time_second + ump->um_itime[type]; dq->dq_curblocks = usage.dqb_curblocks; dq->dq_curinodes = usage.dqb_curinodes; if (dq->dq_curblocks < dq->dq_bsoftlimit) dq->dq_flags &= ~DQ_BLKS; if (dq->dq_curinodes < dq->dq_isoftlimit) dq->dq_flags &= ~DQ_INODS; dq->dq_flags |= DQ_MOD; mutex_exit(&dq->dq_interlock); dqrele(NULLVP, dq); return (0); }
/* * Main code to turn off disk quotas for a filesystem. Does not change * flags. */ static int quotaoff1(struct thread *td, struct mount *mp, int type) { struct vnode *vp; struct vnode *qvp, *mvp; struct ufsmount *ump; struct dquot *dq; struct inode *ip; struct ucred *cr; int error; ump = VFSTOUFS(mp); UFS_LOCK(ump); KASSERT((ump->um_qflags[type] & QTF_CLOSING) != 0, ("quotaoff1: flags are invalid")); if ((qvp = ump->um_quotas[type]) == NULLVP) { UFS_UNLOCK(ump); return (0); } cr = ump->um_cred[type]; UFS_UNLOCK(ump); /* * Search vnodes associated with this mount point, * deleting any references to quota file being closed. */ again: MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { if (vp->v_type == VNON) { VI_UNLOCK(vp); continue; } if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) { MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); goto again; } ip = VTOI(vp); dq = ip->i_dquot[type]; ip->i_dquot[type] = NODQUOT; dqrele(vp, dq); VOP_UNLOCK(vp, 0); vrele(vp); } error = dqflush(qvp); if (error != 0) return (error); /* * Clear um_quotas before closing the quota vnode to prevent * access to the closed vnode from dqget/dqsync */ UFS_LOCK(ump); ump->um_quotas[type] = NULLVP; ump->um_cred[type] = NOCRED; UFS_UNLOCK(ump); vn_lock(qvp, LK_EXCLUSIVE | LK_RETRY); qvp->v_vflag &= ~VV_SYSTEM; VOP_UNLOCK(qvp, 0); error = vn_close(qvp, FREAD|FWRITE, td->td_ucred, td); crfree(cr); return (error); }
/* * Q_QUOTAON - set up a quota file for a particular filesystem. */ int quotaon(struct thread *td, struct mount *mp, int type, void *fname) { struct ufsmount *ump; struct vnode *vp, **vpp; struct vnode *mvp; struct dquot *dq; int error, flags; struct nameidata nd; error = priv_check(td, PRIV_UFS_QUOTAON); if (error != 0) { vfs_unbusy(mp); return (error); } if ((mp->mnt_flag & MNT_RDONLY) != 0) { vfs_unbusy(mp); return (EROFS); } ump = VFSTOUFS(mp); dq = NODQUOT; NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, fname, td); flags = FREAD | FWRITE; vfs_ref(mp); vfs_unbusy(mp); error = vn_open(&nd, &flags, 0, NULL); if (error != 0) { vfs_rel(mp); return (error); } NDFREE(&nd, NDF_ONLY_PNBUF); vp = nd.ni_vp; error = vfs_busy(mp, MBF_NOWAIT); vfs_rel(mp); if (error == 0) { if (vp->v_type != VREG) { error = EACCES; vfs_unbusy(mp); } } if (error != 0) { VOP_UNLOCK(vp, 0); (void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td); return (error); } UFS_LOCK(ump); if ((ump->um_qflags[type] & (QTF_OPENING|QTF_CLOSING)) != 0) { UFS_UNLOCK(ump); VOP_UNLOCK(vp, 0); (void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td); vfs_unbusy(mp); return (EALREADY); } ump->um_qflags[type] |= QTF_OPENING|QTF_CLOSING; UFS_UNLOCK(ump); if ((error = dqopen(vp, ump, type)) != 0) { VOP_UNLOCK(vp, 0); UFS_LOCK(ump); ump->um_qflags[type] &= ~(QTF_OPENING|QTF_CLOSING); UFS_UNLOCK(ump); (void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td); vfs_unbusy(mp); return (error); } VOP_UNLOCK(vp, 0); MNT_ILOCK(mp); mp->mnt_flag |= MNT_QUOTA; MNT_IUNLOCK(mp); vpp = &ump->um_quotas[type]; if (*vpp != vp) quotaoff1(td, mp, type); /* * When the directory vnode containing the quota file is * inactivated, due to the shared lookup of the quota file * vput()ing the dvp, the qsyncvp() call for the containing * directory would try to acquire the quota lock exclusive. * At the same time, lookup already locked the quota vnode * shared. Mark the quota vnode lock as allowing recursion * and automatically converting shared locks to exclusive. * * Also mark quota vnode as system. */ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); vp->v_vflag |= VV_SYSTEM; VN_LOCK_AREC(vp); VN_LOCK_DSHARE(vp); VOP_UNLOCK(vp, 0); *vpp = vp; /* * Save the credential of the process that turned on quotas. * Set up the time limits for this quota. */ ump->um_cred[type] = crhold(td->td_ucred); ump->um_btime[type] = MAX_DQ_TIME; ump->um_itime[type] = MAX_IQ_TIME; if (dqget(NULLVP, 0, ump, type, &dq) == 0) { if (dq->dq_btime > 0) ump->um_btime[type] = dq->dq_btime; if (dq->dq_itime > 0) ump->um_itime[type] = dq->dq_itime; dqrele(NULLVP, dq); } /* * Allow the getdq from getinoquota below to read the quota * from file. */ UFS_LOCK(ump); ump->um_qflags[type] &= ~QTF_CLOSING; UFS_UNLOCK(ump); /* * Search vnodes associated with this mount point, * adding references to quota file being opened. * NB: only need to add dquot's for inodes being modified. */ again: MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) { MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); goto again; } if (vp->v_type == VNON || vp->v_writecount == 0) { VOP_UNLOCK(vp, 0); vrele(vp); continue; } error = getinoquota(VTOI(vp)); VOP_UNLOCK(vp, 0); vrele(vp); if (error) { MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); break; } } if (error) quotaoff_inchange(td, mp, type); UFS_LOCK(ump); ump->um_qflags[type] &= ~QTF_OPENING; KASSERT((ump->um_qflags[type] & QTF_CLOSING) == 0, ("quotaon: leaking flags")); UFS_UNLOCK(ump); vfs_unbusy(mp); return (error); }
/* * Obtain a dquot structure for the specified identifier and quota file * reading the information from the file if necessary. */ static int dqget(struct vnode *vp, u_long id, struct ufsmount *ump, int type, struct dquot **dqp) { uint8_t buf[sizeof(struct dqblk64)]; off_t base, recsize; struct dquot *dq, *dq1; struct dqhash *dqh; struct vnode *dqvp; struct iovec aiov; struct uio auio; int dqvplocked, error; #ifdef DEBUG_VFS_LOCKS if (vp != NULLVP) ASSERT_VOP_ELOCKED(vp, "dqget"); #endif if (vp != NULLVP && *dqp != NODQUOT) { return (0); } /* XXX: Disallow negative id values to prevent the * creation of 100GB+ quota data files. */ if ((int)id < 0) return (EINVAL); UFS_LOCK(ump); dqvp = ump->um_quotas[type]; if (dqvp == NULLVP || (ump->um_qflags[type] & QTF_CLOSING)) { *dqp = NODQUOT; UFS_UNLOCK(ump); return (EINVAL); } vref(dqvp); UFS_UNLOCK(ump); error = 0; dqvplocked = 0; /* * Check the cache first. */ dqh = DQHASH(dqvp, id); DQH_LOCK(); dq = dqhashfind(dqh, id, dqvp); if (dq != NULL) { DQH_UNLOCK(); hfound: DQI_LOCK(dq); DQI_WAIT(dq, PINOD+1, "dqget"); DQI_UNLOCK(dq); if (dq->dq_ump == NULL) { dqrele(vp, dq); dq = NODQUOT; error = EIO; } *dqp = dq; if (dqvplocked) vput(dqvp); else vrele(dqvp); return (error); } /* * Quota vnode lock is before DQ_LOCK. Acquire dqvp lock there * since new dq will appear on the hash chain DQ_LOCKed. */ if (vp != dqvp) { DQH_UNLOCK(); vn_lock(dqvp, LK_SHARED | LK_RETRY); dqvplocked = 1; DQH_LOCK(); /* * Recheck the cache after sleep for quota vnode lock. */ dq = dqhashfind(dqh, id, dqvp); if (dq != NULL) { DQH_UNLOCK(); goto hfound; } } /* * Not in cache, allocate a new one or take it from the * free list. */ if (TAILQ_FIRST(&dqfreelist) == NODQUOT && numdquot < MAXQUOTAS * desiredvnodes) desireddquot += DQUOTINC; if (numdquot < desireddquot) { numdquot++; DQH_UNLOCK(); dq1 = malloc(sizeof *dq1, M_DQUOT, M_WAITOK | M_ZERO); mtx_init(&dq1->dq_lock, "dqlock", NULL, MTX_DEF); DQH_LOCK(); /* * Recheck the cache after sleep for memory. */ dq = dqhashfind(dqh, id, dqvp); if (dq != NULL) { numdquot--; DQH_UNLOCK(); mtx_destroy(&dq1->dq_lock); free(dq1, M_DQUOT); goto hfound; } dq = dq1; } else { if ((dq = TAILQ_FIRST(&dqfreelist)) == NULL) { DQH_UNLOCK(); tablefull("dquot"); *dqp = NODQUOT; if (dqvplocked) vput(dqvp); else vrele(dqvp); return (EUSERS); } if (dq->dq_cnt || (dq->dq_flags & DQ_MOD)) panic("dqget: free dquot isn't %p", dq); TAILQ_REMOVE(&dqfreelist, dq, dq_freelist); if (dq->dq_ump != NULL) LIST_REMOVE(dq, dq_hash); } /* * Dq is put into hash already locked to prevent parallel * usage while it is being read from file. */ dq->dq_flags = DQ_LOCK; dq->dq_id = id; dq->dq_type = type; dq->dq_ump = ump; LIST_INSERT_HEAD(dqh, dq, dq_hash); DQREF(dq); DQH_UNLOCK(); /* * Read the requested quota record from the quota file, performing * any necessary conversions. */ if (ump->um_qflags[type] & QTF_64BIT) { recsize = sizeof(struct dqblk64); base = sizeof(struct dqhdr64); } else { recsize = sizeof(struct dqblk32); base = 0; } auio.uio_iov = &aiov; auio.uio_iovcnt = 1; aiov.iov_base = buf; aiov.iov_len = recsize; auio.uio_resid = recsize; auio.uio_offset = base + id * recsize; auio.uio_segflg = UIO_SYSSPACE; auio.uio_rw = UIO_READ; auio.uio_td = (struct thread *)0; error = VOP_READ(dqvp, &auio, 0, ump->um_cred[type]); if (auio.uio_resid == recsize && error == 0) { bzero(&dq->dq_dqb, sizeof(dq->dq_dqb)); } else { if (ump->um_qflags[type] & QTF_64BIT) dqb64_dq((struct dqblk64 *)buf, dq); else dqb32_dq((struct dqblk32 *)buf, dq); } if (dqvplocked) vput(dqvp); else vrele(dqvp); /* * I/O error in reading quota file, release * quota structure and reflect problem to caller. */ if (error) { DQH_LOCK(); dq->dq_ump = NULL; LIST_REMOVE(dq, dq_hash); DQH_UNLOCK(); DQI_LOCK(dq); if (dq->dq_flags & DQ_WANT) wakeup(dq); dq->dq_flags = 0; DQI_UNLOCK(dq); dqrele(vp, dq); *dqp = NODQUOT; return (error); } DQI_LOCK(dq); /* * Check for no limit to enforce. * Initialize time values if necessary. */ if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 && dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0) dq->dq_flags |= DQ_FAKE; if (dq->dq_id != 0) { if (dq->dq_btime == 0) { dq->dq_btime = time_second + ump->um_btime[type]; if (dq->dq_bsoftlimit && dq->dq_curblocks >= dq->dq_bsoftlimit) dq->dq_flags |= DQ_MOD; } if (dq->dq_itime == 0) { dq->dq_itime = time_second + ump->um_itime[type]; if (dq->dq_isoftlimit && dq->dq_curinodes >= dq->dq_isoftlimit) dq->dq_flags |= DQ_MOD; } } DQI_WAKEUP(dq); DQI_UNLOCK(dq); *dqp = dq; return (0); }
/* * Obtain a dquot structure for the specified identifier and quota file * reading the information from the file if necessary. */ static int dqget(struct vnode *vp, u_long id, struct ufsmount *ump, int type, struct dquot **dqp) { struct dquot *dq, *ndq; struct dqhashhead *dqh; struct vnode *dqvp; struct iovec aiov; struct uio auio; int error; /* Lock to see an up to date value for QTF_CLOSING. */ mutex_enter(&dqlock); dqvp = ump->um_quotas[type]; if (dqvp == NULLVP || (ump->um_qflags[type] & QTF_CLOSING)) { mutex_exit(&dqlock); *dqp = NODQUOT; return (EINVAL); } KASSERT(dqvp != vp); /* * Check the cache first. */ dqh = &dqhashtbl[DQHASH(dqvp, id)]; LIST_FOREACH(dq, dqh, dq_hash) { if (dq->dq_id != id || dq->dq_ump->um_quotas[dq->dq_type] != dqvp) continue; KASSERT(dq->dq_cnt > 0); dqref(dq); mutex_exit(&dqlock); *dqp = dq; return (0); } /* * Not in cache, allocate a new one. */ mutex_exit(&dqlock); ndq = pool_cache_get(dquot_cache, PR_WAITOK); /* * Initialize the contents of the dquot structure. */ memset((char *)ndq, 0, sizeof *ndq); ndq->dq_flags = 0; ndq->dq_id = id; ndq->dq_ump = ump; ndq->dq_type = type; mutex_init(&ndq->dq_interlock, MUTEX_DEFAULT, IPL_NONE); mutex_enter(&dqlock); dqh = &dqhashtbl[DQHASH(dqvp, id)]; LIST_FOREACH(dq, dqh, dq_hash) { if (dq->dq_id != id || dq->dq_ump->um_quotas[dq->dq_type] != dqvp) continue; /* * Another thread beat us allocating this dquot. */ KASSERT(dq->dq_cnt > 0); dqref(dq); mutex_exit(&dqlock); mutex_destroy(&ndq->dq_interlock); pool_cache_put(dquot_cache, ndq); *dqp = dq; return 0; } dq = ndq; LIST_INSERT_HEAD(dqh, dq, dq_hash); dqref(dq); mutex_enter(&dq->dq_interlock); mutex_exit(&dqlock); vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY); auio.uio_iov = &aiov; auio.uio_iovcnt = 1; aiov.iov_base = (void *)&dq->dq_dqb; aiov.iov_len = sizeof (struct dqblk); auio.uio_resid = sizeof (struct dqblk); auio.uio_offset = (off_t)(id * sizeof (struct dqblk)); auio.uio_rw = UIO_READ; UIO_SETUP_SYSSPACE(&auio); error = VOP_READ(dqvp, &auio, 0, ump->um_cred[type]); if (auio.uio_resid == sizeof(struct dqblk) && error == 0) memset((void *)&dq->dq_dqb, 0, sizeof(struct dqblk)); VOP_UNLOCK(dqvp, 0); /* * I/O error in reading quota file, release * quota structure and reflect problem to caller. */ if (error) { mutex_enter(&dqlock); LIST_REMOVE(dq, dq_hash); mutex_exit(&dqlock); mutex_exit(&dq->dq_interlock); dqrele(vp, dq); *dqp = NODQUOT; return (error); } /* * Check for no limit to enforce. * Initialize time values if necessary. */ if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 && dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0) dq->dq_flags |= DQ_FAKE; if (dq->dq_id != 0) { if (dq->dq_btime == 0) dq->dq_btime = time_second + ump->um_btime[type]; if (dq->dq_itime == 0) dq->dq_itime = time_second + ump->um_itime[type]; } mutex_exit(&dq->dq_interlock); *dqp = dq; return (0); }
/* * Q_QUOTAOFF - turn off disk quotas for a filesystem. */ int quotaoff(struct lwp *l, struct mount *mp, int type) { struct vnode *vp; struct vnode *qvp, *mvp; struct ufsmount *ump = VFSTOUFS(mp); struct dquot *dq; struct inode *ip; kauth_cred_t cred; int i, error; /* Allocate a marker vnode. */ if ((mvp = vnalloc(mp)) == NULL) return ENOMEM; mutex_enter(&dqlock); while ((ump->um_qflags[type] & (QTF_CLOSING | QTF_OPENING)) != 0) cv_wait(&dqcv, &dqlock); if ((qvp = ump->um_quotas[type]) == NULLVP) { mutex_exit(&dqlock); vnfree(mvp); return (0); } ump->um_qflags[type] |= QTF_CLOSING; mutex_exit(&dqlock); /* * Search vnodes associated with this mount point, * deleting any references to quota file being closed. */ mutex_enter(&mntvnode_lock); again: for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) { vmark(mvp, vp); mutex_enter(&vp->v_interlock); if (VTOI(vp) == NULL || vp->v_mount != mp || vismarker(vp) || vp->v_type == VNON || (vp->v_iflag & (VI_XLOCK | VI_CLEAN)) != 0) { mutex_exit(&vp->v_interlock); continue; } mutex_exit(&mntvnode_lock); if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) { mutex_enter(&mntvnode_lock); (void)vunmark(mvp); goto again; } ip = VTOI(vp); dq = ip->i_dquot[type]; ip->i_dquot[type] = NODQUOT; dqrele(vp, dq); vput(vp); mutex_enter(&mntvnode_lock); } mutex_exit(&mntvnode_lock); #ifdef DIAGNOSTIC dqflush(qvp); #endif qvp->v_vflag &= ~VV_SYSTEM; error = vn_close(qvp, FREAD|FWRITE, l->l_cred); mutex_enter(&dqlock); ump->um_quotas[type] = NULLVP; cred = ump->um_cred[type]; ump->um_cred[type] = NOCRED; for (i = 0; i < MAXQUOTAS; i++) if (ump->um_quotas[i] != NULLVP) break; ump->um_qflags[type] &= ~QTF_CLOSING; cv_broadcast(&dqcv); mutex_exit(&dqlock); kauth_cred_free(cred); if (i == MAXQUOTAS) mp->mnt_flag &= ~MNT_QUOTA; return (error); }
/* * Q_QUOTAON - set up a quota file for a particular file system. */ int quotaon(struct lwp *l, struct mount *mp, int type, void *fname) { struct ufsmount *ump = VFSTOUFS(mp); struct vnode *vp, **vpp, *mvp; struct dquot *dq; int error; struct nameidata nd; /* XXX XXX XXX */ if (mp->mnt_wapbl != NULL) { printf("%s: quotas cannot yet be used with -o log\n", mp->mnt_stat.f_mntonname); return (EOPNOTSUPP); } vpp = &ump->um_quotas[type]; NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, fname); if ((error = vn_open(&nd, FREAD|FWRITE, 0)) != 0) return (error); vp = nd.ni_vp; VOP_UNLOCK(vp, 0); if (vp->v_type != VREG) { (void) vn_close(vp, FREAD|FWRITE, l->l_cred); return (EACCES); } if (*vpp != vp) quotaoff(l, mp, type); mutex_enter(&dqlock); while ((ump->um_qflags[type] & (QTF_CLOSING | QTF_OPENING)) != 0) cv_wait(&dqcv, &dqlock); ump->um_qflags[type] |= QTF_OPENING; mutex_exit(&dqlock); mp->mnt_flag |= MNT_QUOTA; vp->v_vflag |= VV_SYSTEM; /* XXXSMP */ *vpp = vp; /* * Save the credential of the process that turned on quotas. * Set up the time limits for this quota. */ kauth_cred_hold(l->l_cred); ump->um_cred[type] = l->l_cred; ump->um_btime[type] = MAX_DQ_TIME; ump->um_itime[type] = MAX_IQ_TIME; if (dqget(NULLVP, 0, ump, type, &dq) == 0) { if (dq->dq_btime > 0) ump->um_btime[type] = dq->dq_btime; if (dq->dq_itime > 0) ump->um_itime[type] = dq->dq_itime; dqrele(NULLVP, dq); } /* Allocate a marker vnode. */ if ((mvp = vnalloc(mp)) == NULL) { error = ENOMEM; goto out; } /* * Search vnodes associated with this mount point, * adding references to quota file being opened. * NB: only need to add dquot's for inodes being modified. */ mutex_enter(&mntvnode_lock); again: for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) { vmark(mvp, vp); mutex_enter(&vp->v_interlock); if (VTOI(vp) == NULL || vp->v_mount != mp || vismarker(vp) || vp->v_type == VNON || vp->v_writecount == 0 || (vp->v_iflag & (VI_XLOCK | VI_CLEAN)) != 0) { mutex_exit(&vp->v_interlock); continue; } mutex_exit(&mntvnode_lock); if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) { mutex_enter(&mntvnode_lock); (void)vunmark(mvp); goto again; } if ((error = getinoquota(VTOI(vp))) != 0) { vput(vp); mutex_enter(&mntvnode_lock); (void)vunmark(mvp); break; } vput(vp); mutex_enter(&mntvnode_lock); } mutex_exit(&mntvnode_lock); vnfree(mvp); out: mutex_enter(&dqlock); ump->um_qflags[type] &= ~QTF_OPENING; cv_broadcast(&dqcv); mutex_exit(&dqlock); if (error) quotaoff(l, mp, type); return (error); }
/* * Obtain a dquot structure for the specified identifier and quota file * reading the information from the file if necessary. */ int dqget(struct vnode *vp, u_long id, struct ufsmount *ump, int type, struct dquot **dqp) { struct dquot *dq, *ndq; struct dqhashhead *dqh; struct vnode *dqvp; int error = 0; /* XXX gcc */ /* Lock to see an up to date value for QTF_CLOSING. */ mutex_enter(&dqlock); if ((ump->um_flags & (UFS_QUOTA|UFS_QUOTA2)) == 0) { mutex_exit(&dqlock); *dqp = NODQUOT; return (ENODEV); } dqvp = ump->um_quotas[type]; #ifdef QUOTA if (ump->um_flags & UFS_QUOTA) { if (dqvp == NULLVP || (ump->umq1_qflags[type] & QTF_CLOSING)) { mutex_exit(&dqlock); *dqp = NODQUOT; return (ENODEV); } } #endif #ifdef QUOTA2 if (ump->um_flags & UFS_QUOTA2) { if (dqvp == NULLVP) { mutex_exit(&dqlock); *dqp = NODQUOT; return (ENODEV); } } #endif KASSERT(dqvp != vp); /* * Check the cache first. */ dqh = &dqhashtbl[DQHASH(dqvp, id)]; LIST_FOREACH(dq, dqh, dq_hash) { if (dq->dq_id != id || dq->dq_ump->um_quotas[dq->dq_type] != dqvp) continue; KASSERT(dq->dq_cnt > 0); dqref(dq); mutex_exit(&dqlock); *dqp = dq; return (0); } /* * Not in cache, allocate a new one. */ mutex_exit(&dqlock); ndq = pool_cache_get(dquot_cache, PR_WAITOK); /* * Initialize the contents of the dquot structure. */ memset((char *)ndq, 0, sizeof *ndq); ndq->dq_flags = 0; ndq->dq_id = id; ndq->dq_ump = ump; ndq->dq_type = type; mutex_init(&ndq->dq_interlock, MUTEX_DEFAULT, IPL_NONE); mutex_enter(&dqlock); dqh = &dqhashtbl[DQHASH(dqvp, id)]; LIST_FOREACH(dq, dqh, dq_hash) { if (dq->dq_id != id || dq->dq_ump->um_quotas[dq->dq_type] != dqvp) continue; /* * Another thread beat us allocating this dquot. */ KASSERT(dq->dq_cnt > 0); dqref(dq); mutex_exit(&dqlock); mutex_destroy(&ndq->dq_interlock); pool_cache_put(dquot_cache, ndq); *dqp = dq; return 0; } dq = ndq; LIST_INSERT_HEAD(dqh, dq, dq_hash); dqref(dq); mutex_enter(&dq->dq_interlock); mutex_exit(&dqlock); #ifdef QUOTA if (ump->um_flags & UFS_QUOTA) error = dq1get(dqvp, id, ump, type, dq); #endif #ifdef QUOTA2 if (ump->um_flags & UFS_QUOTA2) error = dq2get(dqvp, id, ump, type, dq); #endif /* * I/O error in reading quota file, release * quota structure and reflect problem to caller. */ if (error) { mutex_enter(&dqlock); LIST_REMOVE(dq, dq_hash); mutex_exit(&dqlock); mutex_exit(&dq->dq_interlock); dqrele(vp, dq); *dqp = NODQUOT; return (error); } mutex_exit(&dq->dq_interlock); *dqp = dq; return (0); }