Exemplo n.º 1
0
/*
 * Check for a valid change to a users allocation.
 * Issue an error message if appropriate.
 */
static int
chkdqchg(struct inode *ip, ufs2_daddr_t change, struct ucred *cred,
         int type, int *warn)
{
    struct dquot *dq = ip->i_dquot[type];
    ufs2_daddr_t ncurblocks = dq->dq_curblocks + change;

    /*
     * If user would exceed their hard limit, disallow space allocation.
     */
    if (ncurblocks >= dq->dq_bhardlimit && dq->dq_bhardlimit) {
        if ((dq->dq_flags & DQ_BLKS) == 0 &&
                ip->i_uid == cred->cr_uid) {
            dq->dq_flags |= DQ_BLKS;
            DQI_UNLOCK(dq);
            uprintf("\n%s: write failed, %s disk limit reached\n",
                    ITOVFS(ip)->mnt_stat.f_mntonname,
                    quotatypes[type]);
            return (EDQUOT);
        }
        DQI_UNLOCK(dq);
        return (EDQUOT);
    }
    /*
     * If user is over their soft limit for too long, disallow space
     * allocation. Reset time limit as they cross their soft limit.
     */
    if (ncurblocks >= dq->dq_bsoftlimit && dq->dq_bsoftlimit) {
        if (dq->dq_curblocks < dq->dq_bsoftlimit) {
            dq->dq_btime = time_second + ITOUMP(ip)->um_btime[type];
            if (ip->i_uid == cred->cr_uid)
                *warn = 1;
            return (0);
        }
        if (time_second > dq->dq_btime) {
            if ((dq->dq_flags & DQ_BLKS) == 0 &&
                    ip->i_uid == cred->cr_uid) {
                dq->dq_flags |= DQ_BLKS;
                DQI_UNLOCK(dq);
                uprintf("\n%s: write failed, %s "
                        "disk quota exceeded for too long\n",
                        ITOVFS(ip)->mnt_stat.f_mntonname,
                        quotatypes[type]);
                return (EDQUOT);
            }
            DQI_UNLOCK(dq);
            return (EDQUOT);
        }
    }
    return (0);
}
Exemplo n.º 2
0
/*
 * On filesystems with quotas enabled, it is an error for a file to change
 * size and not to have a dquot structure associated with it.
 */
static void
chkdquot(struct inode *ip)
{
    struct ufsmount *ump;
    struct vnode *vp;
    int i;

    ump = ITOUMP(ip);
    vp = ITOV(ip);

    /*
     * Disk quotas must be turned off for system files.  Currently
     * these are snapshots and quota files.
     */
    if ((vp->v_vflag & VV_SYSTEM) != 0)
        return;
    /*
     * XXX: Turn off quotas for files with a negative UID or GID.
     * This prevents the creation of 100GB+ quota files.
     */
    if ((int)ip->i_uid < 0 || (int)ip->i_gid < 0)
        return;

    UFS_LOCK(ump);
    for (i = 0; i < MAXQUOTAS; i++) {
        if (ump->um_quotas[i] == NULLVP ||
                (ump->um_qflags[i] & (QTF_OPENING|QTF_CLOSING)))
            continue;
        if (ip->i_dquot[i] == NODQUOT) {
            UFS_UNLOCK(ump);
            vn_printf(ITOV(ip), "chkdquot: missing dquot ");
            panic("chkdquot: missing dquot");
        }
    }
    UFS_UNLOCK(ump);
}
Exemplo n.º 3
0
/*
 * Check the inode limit, applying corrective action.
 */
int
chkiq(struct inode *ip, int change, struct ucred *cred, int flags)
{
    struct dquot *dq;
    int i, error, warn, do_check;

#ifdef DIAGNOSTIC
    if ((flags & CHOWN) == 0)
        chkdquot(ip);
#endif
    if (change == 0)
        return (0);
    if (change < 0) {
        for (i = 0; i < MAXQUOTAS; i++) {
            if ((dq = ip->i_dquot[i]) == NODQUOT)
                continue;
            DQI_LOCK(dq);
            DQI_WAIT(dq, PINOD+1, "chkiq1");
            if (dq->dq_curinodes >= -change)
                dq->dq_curinodes += change;
            else
                dq->dq_curinodes = 0;
            dq->dq_flags &= ~DQ_INODS;
            dq->dq_flags |= DQ_MOD;
            DQI_UNLOCK(dq);
        }
        return (0);
    }
    if ((flags & FORCE) == 0 &&
            priv_check_cred(cred, PRIV_VFS_EXCEEDQUOTA, 0))
        do_check = 1;
    else
        do_check = 0;
    for (i = 0; i < MAXQUOTAS; i++) {
        if ((dq = ip->i_dquot[i]) == NODQUOT)
            continue;
        warn = 0;
        DQI_LOCK(dq);
        DQI_WAIT(dq, PINOD+1, "chkiq2");
        if (do_check) {
            error = chkiqchg(ip, change, cred, i, &warn);
            if (error) {
                /*
                 * Roll back user quota changes when
                 * group quota failed.
                 */
                while (i > 0) {
                    --i;
                    dq = ip->i_dquot[i];
                    if (dq == NODQUOT)
                        continue;
                    DQI_LOCK(dq);
                    DQI_WAIT(dq, PINOD+1, "chkiq3");
                    if (dq->dq_curinodes >= change)
                        dq->dq_curinodes -= change;
                    else
                        dq->dq_curinodes = 0;
                    dq->dq_flags &= ~DQ_INODS;
                    dq->dq_flags |= DQ_MOD;
                    DQI_UNLOCK(dq);
                }
                return (error);
            }
        }
        /* Reset timer when crossing soft limit */
        if (dq->dq_curinodes + change >= dq->dq_isoftlimit &&
                dq->dq_curinodes < dq->dq_isoftlimit)
            dq->dq_itime = time_second + ITOUMP(ip)->um_itime[i];
        dq->dq_curinodes += change;
        dq->dq_flags |= DQ_MOD;
        DQI_UNLOCK(dq);
        if (warn)
            uprintf("\n%s: warning, %s inode quota exceeded\n",
                    ITOVFS(ip)->mnt_stat.f_mntonname,
                    quotatypes[i]);
    }
    return (0);
}
Exemplo n.º 4
0
/*
 * Update disk usage, and take corrective action.
 */
int
chkdq(struct inode *ip, ufs2_daddr_t change, struct ucred *cred, int flags)
{
    struct dquot *dq;
    ufs2_daddr_t ncurblocks;
    struct vnode *vp = ITOV(ip);
    int i, error, warn, do_check;

    /*
     * Disk quotas must be turned off for system files.  Currently
     * snapshot and quota files.
     */
    if ((vp->v_vflag & VV_SYSTEM) != 0)
        return (0);
    /*
     * XXX: Turn off quotas for files with a negative UID or GID.
     * This prevents the creation of 100GB+ quota files.
     */
    if ((int)ip->i_uid < 0 || (int)ip->i_gid < 0)
        return (0);
#ifdef DIAGNOSTIC
    if ((flags & CHOWN) == 0)
        chkdquot(ip);
#endif
    if (change == 0)
        return (0);
    if (change < 0) {
        for (i = 0; i < MAXQUOTAS; i++) {
            if ((dq = ip->i_dquot[i]) == NODQUOT)
                continue;
            DQI_LOCK(dq);
            DQI_WAIT(dq, PINOD+1, "chkdq1");
            ncurblocks = dq->dq_curblocks + change;
            if (ncurblocks >= 0)
                dq->dq_curblocks = ncurblocks;
            else
                dq->dq_curblocks = 0;
            dq->dq_flags &= ~DQ_BLKS;
            dq->dq_flags |= DQ_MOD;
            DQI_UNLOCK(dq);
        }
        return (0);
    }
    if ((flags & FORCE) == 0 &&
            priv_check_cred(cred, PRIV_VFS_EXCEEDQUOTA, 0))
        do_check = 1;
    else
        do_check = 0;
    for (i = 0; i < MAXQUOTAS; i++) {
        if ((dq = ip->i_dquot[i]) == NODQUOT)
            continue;
        warn = 0;
        DQI_LOCK(dq);
        DQI_WAIT(dq, PINOD+1, "chkdq2");
        if (do_check) {
            error = chkdqchg(ip, change, cred, i, &warn);
            if (error) {
                /*
                 * Roll back user quota changes when
                 * group quota failed.
                 */
                while (i > 0) {
                    --i;
                    dq = ip->i_dquot[i];
                    if (dq == NODQUOT)
                        continue;
                    DQI_LOCK(dq);
                    DQI_WAIT(dq, PINOD+1, "chkdq3");
                    ncurblocks = dq->dq_curblocks - change;
                    if (ncurblocks >= 0)
                        dq->dq_curblocks = ncurblocks;
                    else
                        dq->dq_curblocks = 0;
                    dq->dq_flags &= ~DQ_BLKS;
                    dq->dq_flags |= DQ_MOD;
                    DQI_UNLOCK(dq);
                }
                return (error);
            }
        }
        /* Reset timer when crossing soft limit */
        if (dq->dq_curblocks + change >= dq->dq_bsoftlimit &&
                dq->dq_curblocks < dq->dq_bsoftlimit)
            dq->dq_btime = time_second + ITOUMP(ip)->um_btime[i];
        dq->dq_curblocks += change;
        dq->dq_flags |= DQ_MOD;
        DQI_UNLOCK(dq);
        if (warn)
            uprintf("\n%s: warning, %s disk quota exceeded\n",
                    ITOVFS(ip)->mnt_stat.f_mntonname,
                    quotatypes[i]);
    }
    return (0);
}
Exemplo n.º 5
0
/*
 * Update the access, modified, and inode change times as specified by the
 * IN_ACCESS, IN_UPDATE, and IN_CHANGE flags respectively.  Write the inode
 * to disk if the IN_MODIFIED flag is set (it may be set initially, or by
 * the timestamp update).  The IN_LAZYMOD flag is set to force a write
 * later if not now.  The IN_LAZYACCESS is set instead of IN_MODIFIED if the fs
 * is currently being suspended (or is suspended) and vnode has been accessed.
 * If we write now, then clear IN_MODIFIED, IN_LAZYACCESS and IN_LAZYMOD to
 * reflect the presumably successful write, and if waitfor is set, then wait
 * for the write to complete.
 */
int 
ffs_update (vnode *vp, int waitfor)
{
	int error = 0;
	print("HARVEY TODO: %s\n", __func__);
#if 0
	struct fs *fs;
	struct buf *bp;
	struct inode *ip;
	int flags, error;

	ASSERT_VOP_ELOCKED(vp, "ffs_update");
	ufs_itimes(vp);
	ip = VTOI(vp);
	if ((ip->i_flag & IN_MODIFIED) == 0 && waitfor == 0)
		return (0);
	ip->i_flag &= ~(IN_LAZYACCESS | IN_LAZYMOD | IN_MODIFIED);
	fs = ITOFS(ip);
	if (fs->fs_ronly && ITOUMP(ip)->um_fsckpid == 0)
		return (0);
	/*
	 * If we are updating a snapshot and another process is currently
	 * writing the buffer containing the inode for this snapshot then
	 * a deadlock can occur when it tries to check the snapshot to see
	 * if that block needs to be copied. Thus when updating a snapshot
	 * we check to see if the buffer is already locked, and if it is
	 * we drop the snapshot lock until the buffer has been written
	 * and is available to us. We have to grab a reference to the
	 * snapshot vnode to prevent it from being removed while we are
	 * waiting for the buffer.
	 */
	flags = 0;
	if (IS_SNAPSHOT(ip))
		flags = GB_LOCK_NOWAIT;
loop:
	error = breadn_flags(ITODEVVP(ip),
	     fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
	     (int) fs->fs_bsize, 0, 0, 0, NOCRED, flags, &bp);
	if (error != 0) {
		if (error != EBUSY)
			return (error);
		KASSERT((IS_SNAPSHOT(ip)), ("EBUSY from non-snapshot"));
		/*
		 * Wait for our inode block to become available.
		 *
		 * Hold a reference to the vnode to protect against
		 * ffs_snapgone(). Since we hold a reference, it can only
		 * get reclaimed (VI_DOOMED flag) in a forcible downgrade
		 * or unmount. For an unmount, the entire filesystem will be
		 * gone, so we cannot attempt to touch anything associated
		 * with it while the vnode is unlocked; all we can do is 
		 * pause briefly and try again. If when we relock the vnode
		 * we discover that it has been reclaimed, updating it is no
		 * longer necessary and we can just return an error.
		 */
		vref(vp);
		VOP_UNLOCK(vp, 0);
		pause("ffsupd", 1);
		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
		vrele(vp);
		if ((vp->v_iflag & VI_DOOMED) != 0)
			return (ENOENT);
		goto loop;
	}
	if (DOINGSOFTDEP(vp))
		softdep_update_inodeblock(ip, bp, waitfor);
	else if (ip->i_effnlink != ip->i_nlink)
		panic("ffs_update: bad link cnt");
	if (I_IS_UFS1(ip)) {
		*((struct ufs1_dinode *)bp->b_data +
		    ino_to_fsbo(fs, ip->i_number)) = *ip->i_din1;
		/* XXX: FIX? The entropy here is desirable, but the harvesting may be expensive */
		random_harvest_queue(&(ip->i_din1), sizeof(ip->i_din1), 1, RANDOM_FS_ATIME);
	} else {
		*((struct ufs2_dinode *)bp->b_data +
		    ino_to_fsbo(fs, ip->i_number)) = *ip->i_din2;
		/* XXX: FIX? The entropy here is desirable, but the harvesting may be expensive */
		random_harvest_queue(&(ip->i_din2), sizeof(ip->i_din2), 1, RANDOM_FS_ATIME);
	}
	if (waitfor)
		error = bwrite(bp);
	else if (vm_page_count_severe() || buf_dirty_count_severe()) {
		bawrite(bp);
		error = 0;
	} else {
		if (bp->b_bufsize == fs->fs_bsize)
			bp->b_flags |= B_CLUSTEROK;
		bdwrite(bp);
		error = 0;
	}
#endif // 0
	return (error);
}
Exemplo n.º 6
0
/*
 * Balloc defines the structure of filesystem storage
 * by allocating the physical blocks on a device given
 * the inode and the logical block number in a file.
 * This is the allocation strategy for UFS1. Below is
 * the allocation strategy for UFS2.
 */
int
ffs_balloc_ufs1(struct vnode *vp, off_t startoffset, int size,
    struct ucred *cred, int flags, struct buf **bpp)
{
	struct inode *ip;
	struct ufs1_dinode *dp;
	ufs_lbn_t lbn, lastlbn;
	struct fs *fs;
	ufs1_daddr_t nb;
	struct buf *bp, *nbp;
	struct ufsmount *ump;
	struct indir indirs[NIADDR + 2];
	int deallocated, osize, nsize, num, i, error;
	ufs2_daddr_t newb;
	ufs1_daddr_t *bap, pref;
	ufs1_daddr_t *allocib, *blkp, *allocblk, allociblk[NIADDR + 1];
	ufs2_daddr_t *lbns_remfree, lbns[NIADDR + 1];
	int unwindidx = -1;
	int saved_inbdflush;
	static struct timeval lastfail;
	static int curfail;
	int gbflags, reclaimed;

	ip = VTOI(vp);
	dp = ip->i_din1;
	fs = ITOFS(ip);
	ump = ITOUMP(ip);
	lbn = lblkno(fs, startoffset);
	size = blkoff(fs, startoffset) + size;
	reclaimed = 0;
	if (size > fs->fs_bsize)
		panic("ffs_balloc_ufs1: blk too big");
	*bpp = NULL;
	if (flags & IO_EXT)
		return (EOPNOTSUPP);
	if (lbn < 0)
		return (EFBIG);
	gbflags = (flags & BA_UNMAPPED) != 0 ? GB_UNMAPPED : 0;

	if (DOINGSOFTDEP(vp))
		softdep_prealloc(vp, MNT_WAIT);
	/*
	 * If the next write will extend the file into a new block,
	 * and the file is currently composed of a fragment
	 * this fragment has to be extended to be a full block.
	 */
	lastlbn = lblkno(fs, ip->i_size);
	if (lastlbn < NDADDR && lastlbn < lbn) {
		nb = lastlbn;
		osize = blksize(fs, ip, nb);
		if (osize < fs->fs_bsize && osize > 0) {
			UFS_LOCK(ump);
			error = ffs_realloccg(ip, nb, dp->di_db[nb],
			   ffs_blkpref_ufs1(ip, lastlbn, (int)nb,
			   &dp->di_db[0]), osize, (int)fs->fs_bsize, flags,
			   cred, &bp);
			if (error)
				return (error);
			if (DOINGSOFTDEP(vp))
				softdep_setup_allocdirect(ip, nb,
				    dbtofsb(fs, bp->b_blkno), dp->di_db[nb],
				    fs->fs_bsize, osize, bp);
			ip->i_size = smalllblktosize(fs, nb + 1);
			dp->di_size = ip->i_size;
			dp->di_db[nb] = dbtofsb(fs, bp->b_blkno);
			ip->i_flag |= IN_CHANGE | IN_UPDATE;
			if (flags & IO_SYNC)
				bwrite(bp);
			else if (DOINGASYNC(vp))
				bdwrite(bp);
			else
				bawrite(bp);
		}
	}
	/*
	 * The first NDADDR blocks are direct blocks
	 */
	if (lbn < NDADDR) {
		if (flags & BA_METAONLY)
			panic("ffs_balloc_ufs1: BA_METAONLY for direct block");
		nb = dp->di_db[lbn];
		if (nb != 0 && ip->i_size >= smalllblktosize(fs, lbn + 1)) {
			error = bread(vp, lbn, fs->fs_bsize, NOCRED, &bp);
			if (error) {
				brelse(bp);
				return (error);
			}
			bp->b_blkno = fsbtodb(fs, nb);
			*bpp = bp;
			return (0);
		}
		if (nb != 0) {
			/*
			 * Consider need to reallocate a fragment.
			 */
			osize = fragroundup(fs, blkoff(fs, ip->i_size));
			nsize = fragroundup(fs, size);
			if (nsize <= osize) {
				error = bread(vp, lbn, osize, NOCRED, &bp);
				if (error) {
					brelse(bp);
					return (error);
				}
				bp->b_blkno = fsbtodb(fs, nb);
			} else {
				UFS_LOCK(ump);
				error = ffs_realloccg(ip, lbn, dp->di_db[lbn],
				    ffs_blkpref_ufs1(ip, lbn, (int)lbn,
				    &dp->di_db[0]), osize, nsize, flags,
				    cred, &bp);
				if (error)
					return (error);
				if (DOINGSOFTDEP(vp))
					softdep_setup_allocdirect(ip, lbn,
					    dbtofsb(fs, bp->b_blkno), nb,
					    nsize, osize, bp);
			}
		} else {
			if (ip->i_size < smalllblktosize(fs, lbn + 1))
				nsize = fragroundup(fs, size);
			else
				nsize = fs->fs_bsize;
			UFS_LOCK(ump);
			error = ffs_alloc(ip, lbn,
			    ffs_blkpref_ufs1(ip, lbn, (int)lbn, &dp->di_db[0]),
			    nsize, flags, cred, &newb);
			if (error)
				return (error);
			bp = getblk(vp, lbn, nsize, 0, 0, gbflags);
			bp->b_blkno = fsbtodb(fs, newb);
			if (flags & BA_CLRBUF)
				vfs_bio_clrbuf(bp);
			if (DOINGSOFTDEP(vp))
				softdep_setup_allocdirect(ip, lbn, newb, 0,
				    nsize, 0, bp);
		}
		dp->di_db[lbn] = dbtofsb(fs, bp->b_blkno);
		ip->i_flag |= IN_CHANGE | IN_UPDATE;
		*bpp = bp;
		return (0);
	}
	/*
	 * Determine the number of levels of indirection.
	 */
	pref = 0;
	if ((error = ufs_getlbns(vp, lbn, indirs, &num)) != 0)
		return(error);
#ifdef INVARIANTS
	if (num < 1)
		panic ("ffs_balloc_ufs1: ufs_getlbns returned indirect block");
#endif
	saved_inbdflush = curthread_pflags_set(TDP_INBDFLUSH);
	/*
	 * Fetch the first indirect block allocating if necessary.
	 */
	--num;
	nb = dp->di_ib[indirs[0].in_off];
	allocib = NULL;
	allocblk = allociblk;
	lbns_remfree = lbns;
	if (nb == 0) {
		UFS_LOCK(ump);
		pref = ffs_blkpref_ufs1(ip, lbn, -indirs[0].in_off - 1,
		    (ufs1_daddr_t *)0);
		if ((error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize,
		    flags, cred, &newb)) != 0) {
			curthread_pflags_restore(saved_inbdflush);
			return (error);
		}
		pref = newb + fs->fs_frag;
		nb = newb;
		MPASS(allocblk < allociblk + nitems(allociblk));
		MPASS(lbns_remfree < lbns + nitems(lbns));
		*allocblk++ = nb;
		*lbns_remfree++ = indirs[1].in_lbn;
		bp = getblk(vp, indirs[1].in_lbn, fs->fs_bsize, 0, 0, gbflags);
		bp->b_blkno = fsbtodb(fs, nb);
		vfs_bio_clrbuf(bp);
		if (DOINGSOFTDEP(vp)) {
			softdep_setup_allocdirect(ip, NDADDR + indirs[0].in_off,
			    newb, 0, fs->fs_bsize, 0, bp);
			bdwrite(bp);
		} else if ((flags & IO_SYNC) == 0 && DOINGASYNC(vp)) {
			if (bp->b_bufsize == fs->fs_bsize)
				bp->b_flags |= B_CLUSTEROK;
			bdwrite(bp);
		} else {
			if ((error = bwrite(bp)) != 0)
				goto fail;
		}
		allocib = &dp->di_ib[indirs[0].in_off];
		*allocib = nb;
		ip->i_flag |= IN_CHANGE | IN_UPDATE;
	}
	/*
	 * Fetch through the indirect blocks, allocating as necessary.
	 */
retry:
	for (i = 1;;) {
		error = bread(vp,
		    indirs[i].in_lbn, (int)fs->fs_bsize, NOCRED, &bp);
		if (error) {
			brelse(bp);
			goto fail;
		}
		bap = (ufs1_daddr_t *)bp->b_data;
		nb = bap[indirs[i].in_off];
		if (i == num)
			break;
		i += 1;
		if (nb != 0) {
			bqrelse(bp);
			continue;
		}
		UFS_LOCK(ump);
		/*
		 * If parent indirect has just been allocated, try to cluster
		 * immediately following it.
		 */
		if (pref == 0)
			pref = ffs_blkpref_ufs1(ip, lbn, i - num - 1,
			    (ufs1_daddr_t *)0);
		if ((error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize,
		    flags | IO_BUFLOCKED, cred, &newb)) != 0) {
			brelse(bp);
			if (DOINGSOFTDEP(vp) && ++reclaimed == 1) {
				UFS_LOCK(ump);
				softdep_request_cleanup(fs, vp, cred,
				    FLUSH_BLOCKS_WAIT);
				UFS_UNLOCK(ump);
				goto retry;
			}
			if (ppsratecheck(&lastfail, &curfail, 1)) {
				ffs_fserr(fs, ip->i_number, "filesystem full");
				uprintf("\n%s: write failed, filesystem "
				    "is full\n", fs->fs_fsmnt);
			}
			goto fail;
		}
		pref = newb + fs->fs_frag;
		nb = newb;
		MPASS(allocblk < allociblk + nitems(allociblk));
		MPASS(lbns_remfree < lbns + nitems(lbns));
		*allocblk++ = nb;
		*lbns_remfree++ = indirs[i].in_lbn;
		nbp = getblk(vp, indirs[i].in_lbn, fs->fs_bsize, 0, 0, 0);
		nbp->b_blkno = fsbtodb(fs, nb);
		vfs_bio_clrbuf(nbp);
		if (DOINGSOFTDEP(vp)) {
			softdep_setup_allocindir_meta(nbp, ip, bp,
			    indirs[i - 1].in_off, nb);
			bdwrite(nbp);
		} else if ((flags & IO_SYNC) == 0 && DOINGASYNC(vp)) {
			if (nbp->b_bufsize == fs->fs_bsize)
				nbp->b_flags |= B_CLUSTEROK;
			bdwrite(nbp);
		} else {
			if ((error = bwrite(nbp)) != 0) {
				brelse(bp);
				goto fail;
			}
		}
		bap[indirs[i - 1].in_off] = nb;
		if (allocib == NULL && unwindidx < 0)
			unwindidx = i - 1;
		/*
		 * If required, write synchronously, otherwise use
		 * delayed write.
		 */
		if (flags & IO_SYNC) {
			bwrite(bp);
		} else {
			if (bp->b_bufsize == fs->fs_bsize)
				bp->b_flags |= B_CLUSTEROK;
			bdwrite(bp);
		}
	}
	/*
	 * If asked only for the indirect block, then return it.
	 */
	if (flags & BA_METAONLY) {
		curthread_pflags_restore(saved_inbdflush);
		*bpp = bp;
		return (0);
	}
	/*
	 * Get the data block, allocating if necessary.
	 */
	if (nb == 0) {
		UFS_LOCK(ump);
		/*
		 * If allocating metadata at the front of the cylinder
		 * group and parent indirect block has just been allocated,
		 * then cluster next to it if it is the first indirect in
		 * the file. Otherwise it has been allocated in the metadata
		 * area, so we want to find our own place out in the data area.
		 */
		if (pref == 0 || (lbn > NDADDR && fs->fs_metaspace != 0))
			pref = ffs_blkpref_ufs1(ip, lbn, indirs[i].in_off,
			    &bap[0]);
		error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize,
		    flags | IO_BUFLOCKED, cred, &newb);
		if (error) {
			brelse(bp);
			if (DOINGSOFTDEP(vp) && ++reclaimed == 1) {
				UFS_LOCK(ump);
				softdep_request_cleanup(fs, vp, cred,
				    FLUSH_BLOCKS_WAIT);
				UFS_UNLOCK(ump);
				goto retry;
			}
			if (ppsratecheck(&lastfail, &curfail, 1)) {
				ffs_fserr(fs, ip->i_number, "filesystem full");
				uprintf("\n%s: write failed, filesystem "
				    "is full\n", fs->fs_fsmnt);
			}
			goto fail;
		}
		nb = newb;
		MPASS(allocblk < allociblk + nitems(allociblk));
		MPASS(lbns_remfree < lbns + nitems(lbns));
		*allocblk++ = nb;
		*lbns_remfree++ = lbn;
		nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0, gbflags);
		nbp->b_blkno = fsbtodb(fs, nb);
		if (flags & BA_CLRBUF)
			vfs_bio_clrbuf(nbp);
		if (DOINGSOFTDEP(vp))
			softdep_setup_allocindir_page(ip, lbn, bp,
			    indirs[i].in_off, nb, 0, nbp);
		bap[indirs[i].in_off] = nb;
		/*
		 * If required, write synchronously, otherwise use
		 * delayed write.
		 */
		if (flags & IO_SYNC) {
			bwrite(bp);
		} else {
			if (bp->b_bufsize == fs->fs_bsize)
				bp->b_flags |= B_CLUSTEROK;
			bdwrite(bp);
		}
		curthread_pflags_restore(saved_inbdflush);
		*bpp = nbp;
		return (0);
	}
	brelse(bp);
	if (flags & BA_CLRBUF) {
		int seqcount = (flags & BA_SEQMASK) >> BA_SEQSHIFT;
		if (seqcount != 0 &&
		    (vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0 &&
		    !(vm_page_count_severe() || buf_dirty_count_severe())) {
			error = cluster_read(vp, ip->i_size, lbn,
			    (int)fs->fs_bsize, NOCRED,
			    MAXBSIZE, seqcount, gbflags, &nbp);
		} else {
			error = bread_gb(vp, lbn, (int)fs->fs_bsize, NOCRED,
			    gbflags, &nbp);
		}
		if (error) {
			brelse(nbp);
			goto fail;
		}
	} else {