/* * Go through the disk queues to initiate sandbagged IO; * go through the inodes to write those that have been modified; * initiate the writing of the super block if it has been modified. * * Should always be called with the mount point locked. */ int ffs_sync(struct mount *mp, int waitfor, struct ucred *cred, struct proc *p) { struct ufsmount *ump = VFSTOUFS(mp); struct fs *fs; int error, allerror = 0, count; struct ffs_sync_args fsa; fs = ump->um_fs; /* * Write back modified superblock. * Consistency check that the superblock * is still in the buffer cache. */ if (fs->fs_fmod != 0 && fs->fs_ronly != 0) { printf("fs = %s\n", fs->fs_fsmnt); panic("update: rofs mod"); } loop: /* * Write back each (modified) inode. */ fsa.allerror = 0; fsa.p = p; fsa.cred = cred; fsa.waitfor = waitfor; /* * Don't traverse the vnode list if we want to skip all of them. */ if (waitfor != MNT_LAZY) { vfs_mount_foreach_vnode(mp, ffs_sync_vnode, &fsa); allerror = fsa.allerror; } /* * Force stale file system control information to be flushed. */ if ((ump->um_mountp->mnt_flag & MNT_SOFTDEP) && waitfor == MNT_WAIT) { if ((error = softdep_flushworklist(ump->um_mountp, &count, p))) allerror = error; /* Flushed work items may create new vnodes to clean */ if (count) goto loop; } if (waitfor != MNT_LAZY) { if (ump->um_mountp->mnt_flag & MNT_SOFTDEP) waitfor = MNT_NOWAIT; vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY, p); if ((error = VOP_FSYNC(ump->um_devvp, cred, waitfor, p)) != 0) allerror = error; VOP_UNLOCK(ump->um_devvp, 0, p); } qsync(mp); /* * Write back modified superblock. */ if (fs->fs_fmod != 0 && (error = ffs_sbupdate(ump, waitfor)) != 0) allerror = error; return (allerror); }
/* * Do operations associated with quotas */ int ufs_quotactl(struct mount *mp, int cmds, uid_t uid, void *arg) { struct lwp *l = curlwp; #ifndef QUOTA (void) mp; (void) cmds; (void) uid; (void) arg; (void) l; return (EOPNOTSUPP); #else int cmd, type, error; if (uid == -1) uid = kauth_cred_getuid(l->l_cred); cmd = cmds >> SUBCMDSHIFT; switch (cmd) { case Q_SYNC: break; case Q_GETQUOTA: if (uid == kauth_cred_getuid(l->l_cred)) break; /* fall through */ default: if ((error = kauth_authorize_generic(l->l_cred, KAUTH_GENERIC_ISSUSER, NULL)) != 0) return (error); } type = cmds & SUBCMDMASK; if ((u_int)type >= MAXQUOTAS) return (EINVAL); error = vfs_busy(mp, NULL); if (error != 0) return (error); mutex_enter(&mp->mnt_updating); switch (cmd) { case Q_QUOTAON: error = quotaon(l, mp, type, arg); break; case Q_QUOTAOFF: error = quotaoff(l, mp, type); break; case Q_SETQUOTA: error = setquota(mp, uid, type, arg); break; case Q_SETUSE: error = setuse(mp, uid, type, arg); break; case Q_GETQUOTA: error = getquota(mp, uid, type, arg); break; case Q_SYNC: error = qsync(mp); break; default: error = EINVAL; } mutex_exit(&mp->mnt_updating); vfs_unbusy(mp, false, NULL); return (error); #endif }
/* * Sys call to allow users to find out * their current position wrt quota's * and to allow super users to alter it. */ qquota() { register struct a { int cmd; int uid; int arg; caddr_t addr; } *uap = (struct a *)u.u_ap; register struct quota *q; #ifndef QUOTA u.u_error = EINVAL; return; #else if (uap->uid < 0) uap->uid = u.u_ruid; if (uap->uid != u.u_ruid && uap->uid != u.u_quota->q_uid && !suser()) return; if (uap->cmd != Q_SYNC && uap->cmd != Q_SETUID) { q = getquota((uid_t)uap->uid, uap->cmd == Q_DOWARN, 0); if (q == NOQUOTA) { u.u_error = ESRCH; return; } if (u.u_error) goto bad; } switch (uap->cmd) { case Q_SETDLIM: u.u_error = setdlim(q, (dev_t)uap->arg, uap->addr); break; case Q_GETDLIM: u.u_error = getdlim(q, (dev_t)uap->arg, uap->addr); break; case Q_SETDUSE: u.u_error = setduse(q, (dev_t)uap->arg, uap->addr); break; case Q_SETWARN: u.u_error = setwarn(q, (dev_t)uap->arg, uap->addr); break; case Q_DOWARN: u.u_error = dowarn(q, (dev_t)uap->arg); break; case Q_SYNC: u.u_error = qsync((dev_t)uap->arg); return; case Q_SETUID: u.u_error = qsetuid((uid_t)uap->uid, uap->arg); return; default: u.u_error = EINVAL; break; } bad: delquota(q); #endif }
/*ARGSUSED*/ int quotactl(struct vnode *vp, intptr_t arg, int flag, struct cred *cr) { struct quotctl quot; struct ufsvfs *ufsvfsp; int error = 0; if ((flag & DATAMODEL_MASK) == DATAMODEL_NATIVE) { if (copyin((caddr_t)arg, ", sizeof (struct quotctl))) return (EFAULT); } #ifdef _SYSCALL32_IMPL else { /* quotctl struct from ILP32 callers */ struct quotctl32 quot32; if (copyin((caddr_t)arg, "32, sizeof (struct quotctl32))) return (EFAULT); quot.op = quot32.op; quot.uid = quot32.uid; quot.addr = (caddr_t)(uintptr_t)quot32.addr; } #endif /* _SYSCALL32_IMPL */ if (quot.uid < 0) quot.uid = crgetruid(cr); if (quot.op == Q_SYNC && vp == NULL) { ufsvfsp = NULL; } else if (quot.op != Q_ALLSYNC) { ufsvfsp = (struct ufsvfs *)(vp->v_vfsp->vfs_data); } switch (quot.op) { case Q_QUOTAON: rw_enter(&dq_rwlock, RW_WRITER); if (quotas_initialized == 0) { qtinit2(); quotas_initialized = 1; } rw_exit(&dq_rwlock); error = opendq(ufsvfsp, vp, cr); break; case Q_QUOTAOFF: error = closedq(ufsvfsp, cr); if (!error) { invalidatedq(ufsvfsp); } break; case Q_SETQUOTA: case Q_SETQLIM: error = setquota(quot.op, (uid_t)quot.uid, ufsvfsp, quot.addr, cr); break; case Q_GETQUOTA: error = getquota((uid_t)quot.uid, ufsvfsp, (caddr_t)quot.addr, cr); break; case Q_SYNC: error = qsync(ufsvfsp); break; case Q_ALLSYNC: (void) qsync(NULL); break; default: error = EINVAL; break; } return (error); }