daddr_t
ffs_blkpref_ufs2(struct inode *ip, daddr_t lbn, int indx, int64_t *bap)
{
	struct fs *fs;
	int cg;
	int avgbfree, startcg;

	fs = ip->i_fs;
	if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
		if (lbn < UFS_NDADDR + FFS_NINDIR(fs)) {
			cg = ino_to_cg(fs, ip->i_number);
			return (fs->fs_fpg * cg + fs->fs_frag);
		}
		/*
		 * Find a cylinder with greater than average number of
		 * unused data blocks.
		 */
		if (indx == 0 || bap[indx - 1] == 0)
			startcg =
			    ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg;
		else
			startcg = dtog(fs,
				ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1);
		startcg %= fs->fs_ncg;
		avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
		for (cg = startcg; cg < fs->fs_ncg; cg++)
			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
				return (fs->fs_fpg * cg + fs->fs_frag);
			}
		for (cg = 0; cg < startcg; cg++)
			if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
				return (fs->fs_fpg * cg + fs->fs_frag);
			}
		return (0);
	}
	/*
	 * We just always try to lay things out contiguously.
	 */
	return ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag;
}
Example #2
0
/*
 * Truncate the inode oip to at most length size, freeing the
 * disk blocks.
 */
int
ffs_truncate(struct vnode *ovp, off_t length, int ioflag, kauth_cred_t cred)
{
    daddr_t lastblock;
    struct inode *oip = VTOI(ovp);
    daddr_t bn, lastiblock[UFS_NIADDR], indir_lbn[UFS_NIADDR];
    daddr_t blks[UFS_NDADDR + UFS_NIADDR];
    struct fs *fs;
    int offset, pgoffset, level;
    int64_t count, blocksreleased = 0;
    int i, aflag, nblocks;
    int error, allerror = 0;
    off_t osize;
    int sync;
    struct ufsmount *ump = oip->i_ump;

    if (ovp->v_type == VCHR || ovp->v_type == VBLK ||
            ovp->v_type == VFIFO || ovp->v_type == VSOCK) {
        KASSERT(oip->i_size == 0);
        return 0;
    }

    if (length < 0)
        return (EINVAL);

    if (ovp->v_type == VLNK &&
            (oip->i_size < ump->um_maxsymlinklen ||
             (ump->um_maxsymlinklen == 0 && DIP(oip, blocks) == 0))) {
        KDASSERT(length == 0);
        memset(SHORTLINK(oip), 0, (size_t)oip->i_size);
        oip->i_size = 0;
        DIP_ASSIGN(oip, size, 0);
        oip->i_flag |= IN_CHANGE | IN_UPDATE;
        return (ffs_update(ovp, NULL, NULL, 0));
    }
    if (oip->i_size == length) {
        /* still do a uvm_vnp_setsize() as writesize may be larger */
        uvm_vnp_setsize(ovp, length);
        oip->i_flag |= IN_CHANGE | IN_UPDATE;
        return (ffs_update(ovp, NULL, NULL, 0));
    }
    fs = oip->i_fs;
    if (length > ump->um_maxfilesize)
        return (EFBIG);

    if ((oip->i_flags & SF_SNAPSHOT) != 0)
        ffs_snapremove(ovp);

    osize = oip->i_size;
    aflag = ioflag & IO_SYNC ? B_SYNC : 0;

    /*
     * Lengthen the size of the file. We must ensure that the
     * last byte of the file is allocated. Since the smallest
     * value of osize is 0, length will be at least 1.
     */

    if (osize < length) {
        if (ffs_lblkno(fs, osize) < UFS_NDADDR &&
                ffs_lblkno(fs, osize) != ffs_lblkno(fs, length) &&
                ffs_blkroundup(fs, osize) != osize) {
            off_t eob;

            eob = ffs_blkroundup(fs, osize);
            uvm_vnp_setwritesize(ovp, eob);
            error = ufs_balloc_range(ovp, osize, eob - osize,
                                     cred, aflag);
            if (error) {
                (void) ffs_truncate(ovp, osize,
                                    ioflag & IO_SYNC, cred);
                return error;
            }
            if (ioflag & IO_SYNC) {
                mutex_enter(ovp->v_interlock);
                VOP_PUTPAGES(ovp,
                             trunc_page(osize & fs->fs_bmask),
                             round_page(eob), PGO_CLEANIT | PGO_SYNCIO |
                             PGO_JOURNALLOCKED);
            }
        }
        uvm_vnp_setwritesize(ovp, length);
        error = ufs_balloc_range(ovp, length - 1, 1, cred, aflag);
        if (error) {
            (void) ffs_truncate(ovp, osize, ioflag & IO_SYNC, cred);
            return (error);
        }
        uvm_vnp_setsize(ovp, length);
        oip->i_flag |= IN_CHANGE | IN_UPDATE;
        KASSERT(ovp->v_size == oip->i_size);
        return (ffs_update(ovp, NULL, NULL, 0));
    }

    /*
     * When truncating a regular file down to a non-block-aligned size,
     * we must zero the part of last block which is past the new EOF.
     * We must synchronously flush the zeroed pages to disk
     * since the new pages will be invalidated as soon as we
     * inform the VM system of the new, smaller size.
     * We must do this before acquiring the GLOCK, since fetching
     * the pages will acquire the GLOCK internally.
     * So there is a window where another thread could see a whole
     * zeroed page past EOF, but that's life.
     */

    offset = ffs_blkoff(fs, length);
    pgoffset = length & PAGE_MASK;
    if (ovp->v_type == VREG && (pgoffset != 0 || offset != 0) &&
            osize > length) {
        daddr_t lbn;
        voff_t eoz;
        int size;

        if (offset != 0) {
            error = ufs_balloc_range(ovp, length - 1, 1, cred,
                                     aflag);
            if (error)
                return error;
        }
        lbn = ffs_lblkno(fs, length);
        size = ffs_blksize(fs, oip, lbn);
        eoz = MIN(MAX(ffs_lblktosize(fs, lbn) + size, round_page(pgoffset)),
                  osize);
        ubc_zerorange(&ovp->v_uobj, length, eoz - length,
                      UBC_UNMAP_FLAG(ovp));
        if (round_page(eoz) > round_page(length)) {
            mutex_enter(ovp->v_interlock);
            error = VOP_PUTPAGES(ovp, round_page(length),
                                 round_page(eoz),
                                 PGO_CLEANIT | PGO_DEACTIVATE | PGO_JOURNALLOCKED |
                                 ((ioflag & IO_SYNC) ? PGO_SYNCIO : 0));
            if (error)
                return error;
        }
    }

    genfs_node_wrlock(ovp);
    oip->i_size = length;
    DIP_ASSIGN(oip, size, length);
    uvm_vnp_setsize(ovp, length);
    /*
     * Calculate index into inode's block list of
     * last direct and indirect blocks (if any)
     * which we want to keep.  Lastblock is -1 when
     * the file is truncated to 0.
     */
    lastblock = ffs_lblkno(fs, length + fs->fs_bsize - 1) - 1;
    lastiblock[SINGLE] = lastblock - UFS_NDADDR;
    lastiblock[DOUBLE] = lastiblock[SINGLE] - FFS_NINDIR(fs);
    lastiblock[TRIPLE] = lastiblock[DOUBLE] - FFS_NINDIR(fs) * FFS_NINDIR(fs);
    nblocks = btodb(fs->fs_bsize);
    /*
     * Update file and block pointers on disk before we start freeing
     * blocks.  If we crash before free'ing blocks below, the blocks
     * will be returned to the free list.  lastiblock values are also
     * normalized to -1 for calls to ffs_indirtrunc below.
     */
    sync = 0;
    for (level = TRIPLE; level >= SINGLE; level--) {
        blks[UFS_NDADDR + level] = DIP(oip, ib[level]);
        if (lastiblock[level] < 0 && blks[UFS_NDADDR + level] != 0) {
            sync = 1;
            DIP_ASSIGN(oip, ib[level], 0);
            lastiblock[level] = -1;
        }
    }
    for (i = 0; i < UFS_NDADDR; i++) {
        blks[i] = DIP(oip, db[i]);
        if (i > lastblock && blks[i] != 0) {
            sync = 1;
            DIP_ASSIGN(oip, db[i], 0);
        }
    }
    oip->i_flag |= IN_CHANGE | IN_UPDATE;
    if (sync) {
        error = ffs_update(ovp, NULL, NULL, UPDATE_WAIT);
        if (error && !allerror)
            allerror = error;
    }

    /*
     * Having written the new inode to disk, save its new configuration
     * and put back the old block pointers long enough to process them.
     * Note that we save the new block configuration so we can check it
     * when we are done.
     */
    for (i = 0; i < UFS_NDADDR; i++) {
        bn = DIP(oip, db[i]);
        DIP_ASSIGN(oip, db[i], blks[i]);
        blks[i] = bn;
    }
    for (i = 0; i < UFS_NIADDR; i++) {
        bn = DIP(oip, ib[i]);
        DIP_ASSIGN(oip, ib[i], blks[UFS_NDADDR + i]);
        blks[UFS_NDADDR + i] = bn;
    }

    oip->i_size = osize;
    DIP_ASSIGN(oip, size, osize);
    error = vtruncbuf(ovp, lastblock + 1, 0, 0);
    if (error && !allerror)
        allerror = error;

    /*
     * Indirect blocks first.
     */
    indir_lbn[SINGLE] = -UFS_NDADDR;
    indir_lbn[DOUBLE] = indir_lbn[SINGLE] - FFS_NINDIR(fs) - 1;
    indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - FFS_NINDIR(fs) * FFS_NINDIR(fs) - 1;
    for (level = TRIPLE; level >= SINGLE; level--) {
        if (oip->i_ump->um_fstype == UFS1)
            bn = ufs_rw32(oip->i_ffs1_ib[level],UFS_FSNEEDSWAP(fs));
        else
            bn = ufs_rw64(oip->i_ffs2_ib[level],UFS_FSNEEDSWAP(fs));
        if (bn != 0) {
            error = ffs_indirtrunc(oip, indir_lbn[level],
                                   FFS_FSBTODB(fs, bn), lastiblock[level], level, &count);
            if (error)
                allerror = error;
            blocksreleased += count;
            if (lastiblock[level] < 0) {
                DIP_ASSIGN(oip, ib[level], 0);
                if (oip->i_ump->um_mountp->mnt_wapbl) {
                    UFS_WAPBL_REGISTER_DEALLOCATION(
                        oip->i_ump->um_mountp,
                        FFS_FSBTODB(fs, bn), fs->fs_bsize);
                } else
                    ffs_blkfree(fs, oip->i_devvp, bn,
                                fs->fs_bsize, oip->i_number);
                blocksreleased += nblocks;
            }
        }
        if (lastiblock[level] >= 0)
            goto done;
    }

    /*
     * All whole direct blocks or frags.
     */
    for (i = UFS_NDADDR - 1; i > lastblock; i--) {
        long bsize;

        if (oip->i_ump->um_fstype == UFS1)
            bn = ufs_rw32(oip->i_ffs1_db[i], UFS_FSNEEDSWAP(fs));
        else
            bn = ufs_rw64(oip->i_ffs2_db[i], UFS_FSNEEDSWAP(fs));
        if (bn == 0)
            continue;
        DIP_ASSIGN(oip, db[i], 0);
        bsize = ffs_blksize(fs, oip, i);
        if ((oip->i_ump->um_mountp->mnt_wapbl) &&
                (ovp->v_type != VREG)) {
            UFS_WAPBL_REGISTER_DEALLOCATION(oip->i_ump->um_mountp,
                                            FFS_FSBTODB(fs, bn), bsize);
        } else
            ffs_blkfree(fs, oip->i_devvp, bn, bsize, oip->i_number);
        blocksreleased += btodb(bsize);
    }
    if (lastblock < 0)
        goto done;

    /*
     * Finally, look for a change in size of the
     * last direct block; release any frags.
     */
    if (oip->i_ump->um_fstype == UFS1)
        bn = ufs_rw32(oip->i_ffs1_db[lastblock], UFS_FSNEEDSWAP(fs));
    else
        bn = ufs_rw64(oip->i_ffs2_db[lastblock], UFS_FSNEEDSWAP(fs));
    if (bn != 0) {
        long oldspace, newspace;

        /*
         * Calculate amount of space we're giving
         * back as old block size minus new block size.
         */
        oldspace = ffs_blksize(fs, oip, lastblock);
        oip->i_size = length;
        DIP_ASSIGN(oip, size, length);
        newspace = ffs_blksize(fs, oip, lastblock);
        if (newspace == 0)
            panic("itrunc: newspace");
        if (oldspace - newspace > 0) {
            /*
             * Block number of space to be free'd is
             * the old block # plus the number of frags
             * required for the storage we're keeping.
             */
            bn += ffs_numfrags(fs, newspace);
            if ((oip->i_ump->um_mountp->mnt_wapbl) &&
                    (ovp->v_type != VREG)) {
                UFS_WAPBL_REGISTER_DEALLOCATION(
                    oip->i_ump->um_mountp, FFS_FSBTODB(fs, bn),
                    oldspace - newspace);
            } else
                ffs_blkfree(fs, oip->i_devvp, bn,
                            oldspace - newspace, oip->i_number);
            blocksreleased += btodb(oldspace - newspace);
        }
    }

done:
#ifdef DIAGNOSTIC
    for (level = SINGLE; level <= TRIPLE; level++)
        if (blks[UFS_NDADDR + level] != DIP(oip, ib[level]))
            panic("itrunc1");
    for (i = 0; i < UFS_NDADDR; i++)
        if (blks[i] != DIP(oip, db[i]))
            panic("itrunc2");
    if (length == 0 &&
            (!LIST_EMPTY(&ovp->v_cleanblkhd) || !LIST_EMPTY(&ovp->v_dirtyblkhd)))
        panic("itrunc3");
#endif /* DIAGNOSTIC */
    /*
     * Put back the real size.
     */
    oip->i_size = length;
    DIP_ASSIGN(oip, size, length);
    DIP_ADD(oip, blocks, -blocksreleased);
    genfs_node_unlock(ovp);
    oip->i_flag |= IN_CHANGE;
    UFS_WAPBL_UPDATE(ovp, NULL, NULL, 0);
#if defined(QUOTA) || defined(QUOTA2)
    (void) chkdq(oip, -blocksreleased, NOCRED, 0);
#endif
    KASSERT(ovp->v_type != VREG || ovp->v_size == oip->i_size);
    return (allerror);
}
Example #3
0
/*
 * Release blocks associated with the inode ip and stored in the indirect
 * block bn.  Blocks are free'd in LIFO order up to (but not including)
 * lastbn.  If level is greater than SINGLE, the block is an indirect block
 * and recursive calls to indirtrunc must be used to cleanse other indirect
 * blocks.
 *
 * NB: triple indirect blocks are untested.
 */
static int
ffs_indirtrunc(struct inode *ip, daddr_t lbn, daddr_t dbn, daddr_t lastbn,
               int level, int64_t *countp)
{
    int i;
    struct buf *bp;
    struct fs *fs = ip->i_fs;
    int32_t *bap1 = NULL;
    int64_t *bap2 = NULL;
    struct vnode *vp;
    daddr_t nb, nlbn, last;
    char *copy = NULL;
    int64_t blkcount, factor, blocksreleased = 0;
    int nblocks;
    int error = 0, allerror = 0;
    const int needswap = UFS_FSNEEDSWAP(fs);
#define RBAP(ip, i) (((ip)->i_ump->um_fstype == UFS1) ? \
	    ufs_rw32(bap1[i], needswap) : ufs_rw64(bap2[i], needswap))
#define BAP_ASSIGN(ip, i, value)					\
	do {								\
		if ((ip)->i_ump->um_fstype == UFS1)			\
			bap1[i] = (value);				\
		else							\
			bap2[i] = (value);				\
	} while(0)

    /*
     * Calculate index in current block of last
     * block to be kept.  -1 indicates the entire
     * block so we need not calculate the index.
     */
    factor = 1;
    for (i = SINGLE; i < level; i++)
        factor *= FFS_NINDIR(fs);
    last = lastbn;
    if (lastbn > 0)
        last /= factor;
    nblocks = btodb(fs->fs_bsize);
    /*
     * Get buffer of block pointers, zero those entries corresponding
     * to blocks to be free'd, and update on disk copy first.  Since
     * double(triple) indirect before single(double) indirect, calls
     * to bmap on these blocks will fail.  However, we already have
     * the on disk address, so we have to set the b_blkno field
     * explicitly instead of letting bread do everything for us.
     */
    vp = ITOV(ip);
    error = ffs_getblk(vp, lbn, FFS_NOBLK, fs->fs_bsize, false, &bp);
    if (error) {
        *countp = 0;
        return error;
    }
    if (bp->b_oflags & (BO_DONE | BO_DELWRI)) {
        /* Braces must be here in case trace evaluates to nothing. */
        trace(TR_BREADHIT, pack(vp, fs->fs_bsize), lbn);
    } else {
        trace(TR_BREADMISS, pack(vp, fs->fs_bsize), lbn);
        curlwp->l_ru.ru_inblock++;	/* pay for read */
        bp->b_flags |= B_READ;
        bp->b_flags &= ~B_COWDONE;	/* we change blkno below */
        if (bp->b_bcount > bp->b_bufsize)
            panic("ffs_indirtrunc: bad buffer size");
        bp->b_blkno = dbn;
        BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
        VOP_STRATEGY(vp, bp);
        error = biowait(bp);
        if (error == 0)
            error = fscow_run(bp, true);
    }
    if (error) {
        brelse(bp, 0);
        *countp = 0;
        return (error);
    }

    if (ip->i_ump->um_fstype == UFS1)
        bap1 = (int32_t *)bp->b_data;
    else
        bap2 = (int64_t *)bp->b_data;
    if (lastbn >= 0) {
        copy = kmem_alloc(fs->fs_bsize, KM_SLEEP);
        memcpy((void *)copy, bp->b_data, (u_int)fs->fs_bsize);
        for (i = last + 1; i < FFS_NINDIR(fs); i++)
            BAP_ASSIGN(ip, i, 0);
        error = bwrite(bp);
        if (error)
            allerror = error;
        if (ip->i_ump->um_fstype == UFS1)
            bap1 = (int32_t *)copy;
        else
            bap2 = (int64_t *)copy;
    }

    /*
     * Recursively free totally unused blocks.
     */
    for (i = FFS_NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last;
            i--, nlbn += factor) {
        nb = RBAP(ip, i);
        if (nb == 0)
            continue;
        if (level > SINGLE) {
            error = ffs_indirtrunc(ip, nlbn, FFS_FSBTODB(fs, nb),
                                   (daddr_t)-1, level - 1,
                                   &blkcount);
            if (error)
                allerror = error;
            blocksreleased += blkcount;
        }
        if ((ip->i_ump->um_mountp->mnt_wapbl) &&
                ((level > SINGLE) || (ITOV(ip)->v_type != VREG))) {
            UFS_WAPBL_REGISTER_DEALLOCATION(ip->i_ump->um_mountp,
                                            FFS_FSBTODB(fs, nb), fs->fs_bsize);
        } else
            ffs_blkfree(fs, ip->i_devvp, nb, fs->fs_bsize,
                        ip->i_number);
        blocksreleased += nblocks;
    }

    /*
     * Recursively free last partial block.
     */
    if (level > SINGLE && lastbn >= 0) {
        last = lastbn % factor;
        nb = RBAP(ip, i);
        if (nb != 0) {
            error = ffs_indirtrunc(ip, nlbn, FFS_FSBTODB(fs, nb),
                                   last, level - 1, &blkcount);
            if (error)
                allerror = error;
            blocksreleased += blkcount;
        }
    }

    if (copy != NULL) {
        kmem_free(copy, fs->fs_bsize);
    } else {
        brelse(bp, BC_INVAL);
    }

    *countp = blocksreleased;
    return (allerror);
}
struct fs *
ffs_mkfs(const char *fsys, const fsinfo_t *fsopts)
{
	int fragsperinode, optimalfpg, origdensity, minfpg, lastminfpg;
	int32_t cylno, i, csfrags;
	long long sizepb;
	void *space;
	int size;
	int nprintcols, printcolwidth;
	ffs_opt_t	*ffs_opts = fsopts->fs_specific;

	Oflag =		ffs_opts->version;
	fssize =        fsopts->size / fsopts->sectorsize;
	sectorsize =    fsopts->sectorsize;
	fsize =         ffs_opts->fsize;
	bsize =         ffs_opts->bsize;
	maxbsize =      ffs_opts->maxbsize;
	maxblkspercg =  ffs_opts->maxblkspercg;
	minfree =       ffs_opts->minfree;
	opt =           ffs_opts->optimization;
	density =       ffs_opts->density;
	maxcontig =     ffs_opts->maxcontig;
	maxbpg =        ffs_opts->maxbpg;
	avgfilesize =   ffs_opts->avgfilesize;
	avgfpdir =      ffs_opts->avgfpdir;
	bbsize =        BBSIZE;
	sbsize =        SBLOCKSIZE;

	strlcpy((char *)sblock.fs_volname, ffs_opts->label,
	    sizeof(sblock.fs_volname));

	if (Oflag == 0) {
		sblock.fs_old_inodefmt = FS_42INODEFMT;
		sblock.fs_maxsymlinklen = 0;
		sblock.fs_old_flags = 0;
	} else {
		sblock.fs_old_inodefmt = FS_44INODEFMT;
		sblock.fs_maxsymlinklen = (Oflag == 1 ? UFS1_MAXSYMLINKLEN :
		    UFS2_MAXSYMLINKLEN);
		sblock.fs_old_flags = FS_FLAGS_UPDATED;
		sblock.fs_flags = 0;
	}
	/*
	 * Validate the given file system size.
	 * Verify that its last block can actually be accessed.
	 * Convert to file system fragment sized units.
	 */
	if (fssize <= 0) {
		printf("preposterous size %lld\n", (long long)fssize);
		exit(13);
	}
	ffs_wtfs(fssize - 1, sectorsize, (char *)&sblock, fsopts);

	/*
	 * collect and verify the filesystem density info
	 */
	sblock.fs_avgfilesize = avgfilesize;
	sblock.fs_avgfpdir = avgfpdir;
	if (sblock.fs_avgfilesize <= 0)
		printf("illegal expected average file size %d\n",
		    sblock.fs_avgfilesize), exit(14);
	if (sblock.fs_avgfpdir <= 0)
		printf("illegal expected number of files per directory %d\n",
		    sblock.fs_avgfpdir), exit(15);
	/*
	 * collect and verify the block and fragment sizes
	 */
	sblock.fs_bsize = bsize;
	sblock.fs_fsize = fsize;
	if (!POWEROF2(sblock.fs_bsize)) {
		printf("block size must be a power of 2, not %d\n",
		    sblock.fs_bsize);
		exit(16);
	}
	if (!POWEROF2(sblock.fs_fsize)) {
		printf("fragment size must be a power of 2, not %d\n",
		    sblock.fs_fsize);
		exit(17);
	}
	if (sblock.fs_fsize < sectorsize) {
		printf("fragment size %d is too small, minimum is %d\n",
		    sblock.fs_fsize, sectorsize);
		exit(18);
	}
	if (sblock.fs_bsize < MINBSIZE) {
		printf("block size %d is too small, minimum is %d\n",
		    sblock.fs_bsize, MINBSIZE);
		exit(19);
	}
	if (sblock.fs_bsize > FFS_MAXBSIZE) {
		printf("block size %d is too large, maximum is %d\n",
		    sblock.fs_bsize, FFS_MAXBSIZE);
		exit(19);
	}
	if (sblock.fs_bsize < sblock.fs_fsize) {
		printf("block size (%d) cannot be smaller than fragment size (%d)\n",
		    sblock.fs_bsize, sblock.fs_fsize);
		exit(20);
	}

	if (maxbsize < bsize || !POWEROF2(maxbsize)) {
		sblock.fs_maxbsize = sblock.fs_bsize;
		printf("Extent size set to %d\n", sblock.fs_maxbsize);
	} else if (sblock.fs_maxbsize > FS_MAXCONTIG * sblock.fs_bsize) {
		sblock.fs_maxbsize = FS_MAXCONTIG * sblock.fs_bsize;
		printf("Extent size reduced to %d\n", sblock.fs_maxbsize);
	} else {
		sblock.fs_maxbsize = maxbsize;
	}
	sblock.fs_maxcontig = maxcontig;
	if (sblock.fs_maxcontig < sblock.fs_maxbsize / sblock.fs_bsize) {
		sblock.fs_maxcontig = sblock.fs_maxbsize / sblock.fs_bsize;
		printf("Maxcontig raised to %d\n", sblock.fs_maxbsize);
	}

	if (sblock.fs_maxcontig > 1)
		sblock.fs_contigsumsize = MIN(sblock.fs_maxcontig,FS_MAXCONTIG);

	sblock.fs_bmask = ~(sblock.fs_bsize - 1);
	sblock.fs_fmask = ~(sblock.fs_fsize - 1);
	sblock.fs_qbmask = ~sblock.fs_bmask;
	sblock.fs_qfmask = ~sblock.fs_fmask;
	for (sblock.fs_bshift = 0, i = sblock.fs_bsize; i > 1; i >>= 1)
		sblock.fs_bshift++;
	for (sblock.fs_fshift = 0, i = sblock.fs_fsize; i > 1; i >>= 1)
		sblock.fs_fshift++;
	sblock.fs_frag = ffs_numfrags(&sblock, sblock.fs_bsize);
	for (sblock.fs_fragshift = 0, i = sblock.fs_frag; i > 1; i >>= 1)
		sblock.fs_fragshift++;
	if (sblock.fs_frag > MAXFRAG) {
		printf("fragment size %d is too small, "
			"minimum with block size %d is %d\n",
		    sblock.fs_fsize, sblock.fs_bsize,
		    sblock.fs_bsize / MAXFRAG);
		exit(21);
	}
	sblock.fs_fsbtodb = ilog2(sblock.fs_fsize / sectorsize);
	sblock.fs_size = fssize = FFS_DBTOFSB(&sblock, fssize);

	if (Oflag <= 1) {
		sblock.fs_magic = FS_UFS1_MAGIC;
		sblock.fs_sblockloc = SBLOCK_UFS1;
		sblock.fs_nindir = sblock.fs_bsize / sizeof(int32_t);
		sblock.fs_inopb = sblock.fs_bsize / sizeof(struct ufs1_dinode);
		sblock.fs_maxsymlinklen = ((UFS_NDADDR + UFS_NIADDR) *
		    sizeof (int32_t));
		sblock.fs_old_inodefmt = FS_44INODEFMT;
		sblock.fs_old_cgoffset = 0;
		sblock.fs_old_cgmask = 0xffffffff;
		sblock.fs_old_size = sblock.fs_size;
		sblock.fs_old_rotdelay = 0;
		sblock.fs_old_rps = 60;
		sblock.fs_old_nspf = sblock.fs_fsize / sectorsize;
		sblock.fs_old_cpg = 1;
		sblock.fs_old_interleave = 1;
		sblock.fs_old_trackskew = 0;
		sblock.fs_old_cpc = 0;
		sblock.fs_old_postblformat = 1;
		sblock.fs_old_nrpos = 1;
	} else {
		sblock.fs_magic = FS_UFS2_MAGIC;
#if 0 /* XXX makefs is used for small filesystems. */
		sblock.fs_sblockloc = SBLOCK_UFS2;
#else
		sblock.fs_sblockloc = SBLOCK_UFS1;
#endif
		sblock.fs_nindir = sblock.fs_bsize / sizeof(int64_t);
		sblock.fs_inopb = sblock.fs_bsize / sizeof(struct ufs2_dinode);
		sblock.fs_maxsymlinklen = ((UFS_NDADDR + UFS_NIADDR) *
		    sizeof (int64_t));
	}

	sblock.fs_sblkno =
	    roundup(howmany(sblock.fs_sblockloc + SBLOCKSIZE, sblock.fs_fsize),
		sblock.fs_frag);
	sblock.fs_cblkno = (daddr_t)(sblock.fs_sblkno +
	    roundup(howmany(SBLOCKSIZE, sblock.fs_fsize), sblock.fs_frag));
	sblock.fs_iblkno = sblock.fs_cblkno + sblock.fs_frag;
	sblock.fs_maxfilesize = sblock.fs_bsize * UFS_NDADDR - 1;
	for (sizepb = sblock.fs_bsize, i = 0; i < UFS_NIADDR; i++) {
		sizepb *= FFS_NINDIR(&sblock);
		sblock.fs_maxfilesize += sizepb;
	}

	/*
	 * Calculate the number of blocks to put into each cylinder group.
	 *
	 * This algorithm selects the number of blocks per cylinder
	 * group. The first goal is to have at least enough data blocks
	 * in each cylinder group to meet the density requirement. Once
	 * this goal is achieved we try to expand to have at least
	 * 1 cylinder group. Once this goal is achieved, we pack as
	 * many blocks into each cylinder group map as will fit.
	 *
	 * We start by calculating the smallest number of blocks that we
	 * can put into each cylinder group. If this is too big, we reduce
	 * the density until it fits.
	 */
	origdensity = density;
	for (;;) {
		fragsperinode = MAX(ffs_numfrags(&sblock, density), 1);
		minfpg = fragsperinode * FFS_INOPB(&sblock);
		if (minfpg > sblock.fs_size)
			minfpg = sblock.fs_size;
		sblock.fs_ipg = FFS_INOPB(&sblock);
		sblock.fs_fpg = roundup(sblock.fs_iblkno +
		    sblock.fs_ipg / FFS_INOPF(&sblock), sblock.fs_frag);
		if (sblock.fs_fpg < minfpg)
			sblock.fs_fpg = minfpg;
		sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode),
		    FFS_INOPB(&sblock));
		sblock.fs_fpg = roundup(sblock.fs_iblkno +
		    sblock.fs_ipg / FFS_INOPF(&sblock), sblock.fs_frag);
		if (sblock.fs_fpg < minfpg)
			sblock.fs_fpg = minfpg;
		sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode),
		    FFS_INOPB(&sblock));
		if (CGSIZE(&sblock) < (unsigned long)sblock.fs_bsize)
			break;
		density -= sblock.fs_fsize;
	}
	if (density != origdensity)
		printf("density reduced from %d to %d\n", origdensity, density);

	if (maxblkspercg <= 0 || maxblkspercg >= fssize)
		maxblkspercg = fssize - 1;
	/*
	 * Start packing more blocks into the cylinder group until
	 * it cannot grow any larger, the number of cylinder groups
	 * drops below 1, or we reach the size requested.
	 */
	for ( ; sblock.fs_fpg < maxblkspercg; sblock.fs_fpg += sblock.fs_frag) {
		sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode),
		    FFS_INOPB(&sblock));
		if (sblock.fs_size / sblock.fs_fpg < 1)
			break;
		if (CGSIZE(&sblock) < (unsigned long)sblock.fs_bsize)
			continue;
		if (CGSIZE(&sblock) == (unsigned long)sblock.fs_bsize)
			break;
		sblock.fs_fpg -= sblock.fs_frag;
		sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode),
		    FFS_INOPB(&sblock));
		break;
	}
	/*
	 * Check to be sure that the last cylinder group has enough blocks
	 * to be viable. If it is too small, reduce the number of blocks
	 * per cylinder group which will have the effect of moving more
	 * blocks into the last cylinder group.
	 */
	optimalfpg = sblock.fs_fpg;
	for (;;) {
		sblock.fs_ncg = howmany(sblock.fs_size, sblock.fs_fpg);
		lastminfpg = roundup(sblock.fs_iblkno +
		    sblock.fs_ipg / FFS_INOPF(&sblock), sblock.fs_frag);
		if (sblock.fs_size < lastminfpg) {
			printf("Filesystem size %lld < minimum size of %d\n",
			    (long long)sblock.fs_size, lastminfpg);
			exit(28);
		}
		if (sblock.fs_size % sblock.fs_fpg >= lastminfpg ||
		    sblock.fs_size % sblock.fs_fpg == 0)
			break;
		sblock.fs_fpg -= sblock.fs_frag;
		sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode),
		    FFS_INOPB(&sblock));
	}
	if (optimalfpg != sblock.fs_fpg)
		printf("Reduced frags per cylinder group from %d to %d %s\n",
		   optimalfpg, sblock.fs_fpg, "to enlarge last cyl group");
	sblock.fs_cgsize = ffs_fragroundup(&sblock, CGSIZE(&sblock));
	sblock.fs_dblkno = sblock.fs_iblkno + sblock.fs_ipg / FFS_INOPF(&sblock);
	if (Oflag <= 1) {
		sblock.fs_old_spc = sblock.fs_fpg * sblock.fs_old_nspf;
		sblock.fs_old_nsect = sblock.fs_old_spc;
		sblock.fs_old_npsect = sblock.fs_old_spc;
		sblock.fs_old_ncyl = sblock.fs_ncg;
	}

	/*
	 * fill in remaining fields of the super block
	 */
	sblock.fs_csaddr = cgdmin(&sblock, 0);
	sblock.fs_cssize =
	    ffs_fragroundup(&sblock, sblock.fs_ncg * sizeof(struct csum));

	/*
	 * Setup memory for temporary in-core cylgroup summaries.
	 * Cribbed from ffs_mountfs().
	 */
	size = sblock.fs_cssize;
	if (sblock.fs_contigsumsize > 0)
		size += sblock.fs_ncg * sizeof(int32_t);
	space = ecalloc(1, size);
	sblock.fs_csp = space;
	space = (char *)space + sblock.fs_cssize;
	if (sblock.fs_contigsumsize > 0) {
		int32_t *lp;

		sblock.fs_maxcluster = lp = space;
		for (i = 0; i < sblock.fs_ncg; i++)
		*lp++ = sblock.fs_contigsumsize;
	}

	sblock.fs_sbsize = ffs_fragroundup(&sblock, sizeof(struct fs));
	if (sblock.fs_sbsize > SBLOCKSIZE)
		sblock.fs_sbsize = SBLOCKSIZE;
	sblock.fs_minfree = minfree;
	sblock.fs_maxcontig = maxcontig;
	sblock.fs_maxbpg = maxbpg;
	sblock.fs_optim = opt;
	sblock.fs_cgrotor = 0;
	sblock.fs_pendingblocks = 0;
	sblock.fs_pendinginodes = 0;
	sblock.fs_cstotal.cs_ndir = 0;
	sblock.fs_cstotal.cs_nbfree = 0;
	sblock.fs_cstotal.cs_nifree = 0;
	sblock.fs_cstotal.cs_nffree = 0;
	sblock.fs_fmod = 0;
	sblock.fs_ronly = 0;
	sblock.fs_state = 0;
	sblock.fs_clean = FS_ISCLEAN;
	sblock.fs_ronly = 0;
	sblock.fs_id[0] = start_time.tv_sec;
	sblock.fs_id[1] = random();
	sblock.fs_fsmnt[0] = '\0';
	csfrags = howmany(sblock.fs_cssize, sblock.fs_fsize);
	sblock.fs_dsize = sblock.fs_size - sblock.fs_sblkno -
	    sblock.fs_ncg * (sblock.fs_dblkno - sblock.fs_sblkno);
	sblock.fs_cstotal.cs_nbfree =
	    ffs_fragstoblks(&sblock, sblock.fs_dsize) -
	    howmany(csfrags, sblock.fs_frag);
	sblock.fs_cstotal.cs_nffree =
	    ffs_fragnum(&sblock, sblock.fs_size) +
	    (ffs_fragnum(&sblock, csfrags) > 0 ?
	    sblock.fs_frag - ffs_fragnum(&sblock, csfrags) : 0);
	sblock.fs_cstotal.cs_nifree = sblock.fs_ncg * sblock.fs_ipg - UFS_ROOTINO;
	sblock.fs_cstotal.cs_ndir = 0;
	sblock.fs_dsize -= csfrags;
	sblock.fs_time = start_time.tv_sec;
	if (Oflag <= 1) {
		sblock.fs_old_time = start_time.tv_sec;
		sblock.fs_old_dsize = sblock.fs_dsize;
		sblock.fs_old_csaddr = sblock.fs_csaddr;
		sblock.fs_old_cstotal.cs_ndir = sblock.fs_cstotal.cs_ndir;
		sblock.fs_old_cstotal.cs_nbfree = sblock.fs_cstotal.cs_nbfree;
		sblock.fs_old_cstotal.cs_nifree = sblock.fs_cstotal.cs_nifree;
		sblock.fs_old_cstotal.cs_nffree = sblock.fs_cstotal.cs_nffree;
	}
	/*
	 * Dump out summary information about file system.
	 */
#define	B2MBFACTOR (1 / (1024.0 * 1024.0))
	printf("%s: %.1fMB (%lld sectors) block size %d, "
	       "fragment size %d\n",
	    fsys, (float)sblock.fs_size * sblock.fs_fsize * B2MBFACTOR,
	    (long long)FFS_FSBTODB(&sblock, sblock.fs_size),
	    sblock.fs_bsize, sblock.fs_fsize);
	printf("\tusing %d cylinder groups of %.2fMB, %d blks, "
	       "%d inodes.\n",
	    sblock.fs_ncg,
	    (float)sblock.fs_fpg * sblock.fs_fsize * B2MBFACTOR,
	    sblock.fs_fpg / sblock.fs_frag, sblock.fs_ipg);
#undef B2MBFACTOR
	/*
	 * Now determine how wide each column will be, and calculate how
	 * many columns will fit in a 76 char line. 76 is the width of the
	 * subwindows in sysinst.
	 */
	printcolwidth = count_digits(
			FFS_FSBTODB(&sblock, cgsblock(&sblock, sblock.fs_ncg -1)));
	nprintcols = 76 / (printcolwidth + 2);

	/*
	 * allocate space for superblock, cylinder group map, and
	 * two sets of inode blocks.
	 */
	if (sblock.fs_bsize < SBLOCKSIZE)
		iobufsize = SBLOCKSIZE + 3 * sblock.fs_bsize;
	else
		iobufsize = 4 * sblock.fs_bsize;
	iobuf = ecalloc(1, iobufsize);
	/*
	 * Make a copy of the superblock into the buffer that we will be
	 * writing out in each cylinder group.
	 */
	memcpy(writebuf, &sblock, sbsize);
	if (fsopts->needswap)
		ffs_sb_swap(&sblock, (struct fs*)writebuf);
	memcpy(iobuf, writebuf, SBLOCKSIZE);

	printf("super-block backups (for fsck -b #) at:");
	for (cylno = 0; cylno < sblock.fs_ncg; cylno++) {
		initcg(cylno, start_time.tv_sec, fsopts);
		if (cylno % nprintcols == 0)
			printf("\n");
		printf(" %*lld,", printcolwidth,
			(long long)FFS_FSBTODB(&sblock, cgsblock(&sblock, cylno)));
		fflush(stdout);
	}
	printf("\n");

	/*
	 * Now construct the initial file system,
	 * then write out the super-block.
	 */
	sblock.fs_time = start_time.tv_sec;
	if (Oflag <= 1) {
		sblock.fs_old_cstotal.cs_ndir = sblock.fs_cstotal.cs_ndir;
		sblock.fs_old_cstotal.cs_nbfree = sblock.fs_cstotal.cs_nbfree;
		sblock.fs_old_cstotal.cs_nifree = sblock.fs_cstotal.cs_nifree;
		sblock.fs_old_cstotal.cs_nffree = sblock.fs_cstotal.cs_nffree;
	}
	if (fsopts->needswap)
		sblock.fs_flags |= FS_SWAPPED;
	ffs_write_superblock(&sblock, fsopts);
	return (&sblock);
}