Esempio n. 1
0
/*
 * Initialize a cylinder group.
 */
void
initcg(int cylno, time_t utime)
{
	long blkno, start;
	off_t savedactualloc;
	uint i, j, d, dlower, dupper;
	ufs2_daddr_t cbase, dmax;
	struct ufs1_dinode *dp1;
	struct ufs2_dinode *dp2;
	struct csum *cs;

	/*
	 * Determine block bounds for cylinder group.
	 * Allow space for super block summary information in first
	 * cylinder group.
	 */
	cbase = cgbase(&sblock, cylno);
	dmax = cbase + sblock.fs_fpg;
	if (dmax > sblock.fs_size)
		dmax = sblock.fs_size;
	dlower = cgsblock(&sblock, cylno) - cbase;
	dupper = cgdmin(&sblock, cylno) - cbase;
	if (cylno == 0)
		dupper += howmany(sblock.fs_cssize, sblock.fs_fsize);
	cs = &fscs[cylno];
	memset(&acg, 0, sblock.fs_cgsize);
	acg.cg_time = utime;
	acg.cg_magic = CG_MAGIC;
	acg.cg_cgx = cylno;
	acg.cg_niblk = sblock.fs_ipg;
	acg.cg_initediblk = MIN(sblock.fs_ipg, 2 * INOPB(&sblock));
	acg.cg_ndblk = dmax - cbase;
	if (sblock.fs_contigsumsize > 0)
		acg.cg_nclusterblks = acg.cg_ndblk / sblock.fs_frag;
	start = &acg.cg_space[0] - (u_char *)(&acg.cg_firstfield);
	if (Oflag == 2) {
		acg.cg_iusedoff = start;
	} else {
		acg.cg_old_ncyl = sblock.fs_old_cpg;
		acg.cg_old_time = acg.cg_time;
		acg.cg_time = 0;
		acg.cg_old_niblk = acg.cg_niblk;
		acg.cg_niblk = 0;
		acg.cg_initediblk = 0;
		acg.cg_old_btotoff = start;
		acg.cg_old_boff = acg.cg_old_btotoff +
		    sblock.fs_old_cpg * sizeof(int32_t);
		acg.cg_iusedoff = acg.cg_old_boff +
		    sblock.fs_old_cpg * sizeof(u_int16_t);
	}
	acg.cg_freeoff = acg.cg_iusedoff + howmany(sblock.fs_ipg, CHAR_BIT);
	acg.cg_nextfreeoff = acg.cg_freeoff + howmany(sblock.fs_fpg, CHAR_BIT);
	if (sblock.fs_contigsumsize > 0) {
		acg.cg_clustersumoff =
		    roundup(acg.cg_nextfreeoff, sizeof(u_int32_t));
		acg.cg_clustersumoff -= sizeof(u_int32_t);
		acg.cg_clusteroff = acg.cg_clustersumoff +
		    (sblock.fs_contigsumsize + 1) * sizeof(u_int32_t);
		acg.cg_nextfreeoff = acg.cg_clusteroff +
		    howmany(fragstoblks(&sblock, sblock.fs_fpg), CHAR_BIT);
	}
	if (acg.cg_nextfreeoff > (unsigned)sblock.fs_cgsize) {
		printf("Panic: cylinder group too big\n");
		exit(37);
	}
	acg.cg_cs.cs_nifree += sblock.fs_ipg;
	if (cylno == 0)
		for (i = 0; i < (long)UFS_ROOTINO; i++) {
			setbit(cg_inosused(&acg), i);
			acg.cg_cs.cs_nifree--;
		}
	if (cylno > 0) {
		/*
		 * In cylno 0, beginning space is reserved
		 * for boot and super blocks.
		 */
		for (d = 0; d < dlower; d += sblock.fs_frag) {
			blkno = d / sblock.fs_frag;
			setblock(&sblock, cg_blksfree(&acg), blkno);
			if (sblock.fs_contigsumsize > 0)
				setbit(cg_clustersfree(&acg), blkno);
			acg.cg_cs.cs_nbfree++;
		}
	}
	if ((i = dupper % sblock.fs_frag)) {
		acg.cg_frsum[sblock.fs_frag - i]++;
		for (d = dupper + sblock.fs_frag - i; dupper < d; dupper++) {
			setbit(cg_blksfree(&acg), dupper);
			acg.cg_cs.cs_nffree++;
		}
	}
	for (d = dupper; d + sblock.fs_frag <= acg.cg_ndblk;
	     d += sblock.fs_frag) {
		blkno = d / sblock.fs_frag;
		setblock(&sblock, cg_blksfree(&acg), blkno);
		if (sblock.fs_contigsumsize > 0)
			setbit(cg_clustersfree(&acg), blkno);
		acg.cg_cs.cs_nbfree++;
	}
	if (d < acg.cg_ndblk) {
		acg.cg_frsum[acg.cg_ndblk - d]++;
		for (; d < acg.cg_ndblk; d++) {
			setbit(cg_blksfree(&acg), d);
			acg.cg_cs.cs_nffree++;
		}
	}
	if (sblock.fs_contigsumsize > 0) {
		int32_t *sump = cg_clustersum(&acg);
		u_char *mapp = cg_clustersfree(&acg);
		int map = *mapp++;
		int bit = 1;
		int run = 0;

		for (i = 0; i < acg.cg_nclusterblks; i++) {
			if ((map & bit) != 0)
				run++;
			else if (run != 0) {
				if (run > sblock.fs_contigsumsize)
					run = sblock.fs_contigsumsize;
				sump[run]++;
				run = 0;
			}
			if ((i & (CHAR_BIT - 1)) != CHAR_BIT - 1)
				bit <<= 1;
			else {
				map = *mapp++;
				bit = 1;
			}
		}
		if (run != 0) {
			if (run > sblock.fs_contigsumsize)
				run = sblock.fs_contigsumsize;
			sump[run]++;
		}
	}
	*cs = acg.cg_cs;
	/*
	 * Write out the duplicate super block. Then write the cylinder
	 * group map and two blocks worth of inodes in a single write.
	 */
	savedactualloc = sblock.fs_sblockactualloc;
	sblock.fs_sblockactualloc =
	    dbtob(fsbtodb(&sblock, cgsblock(&sblock, cylno)));
	if (sbput(disk.d_fd, &disk.d_fs, 0) != 0)
		err(1, "sbput: %s", disk.d_error);
	sblock.fs_sblockactualloc = savedactualloc;
	if (cgput(&disk, &acg) != 0)
		err(1, "initcg: cgput: %s", disk.d_error);
	start = 0;
	dp1 = (struct ufs1_dinode *)(&iobuf[start]);
	dp2 = (struct ufs2_dinode *)(&iobuf[start]);
	for (i = 0; i < acg.cg_initediblk; i++) {
		if (sblock.fs_magic == FS_UFS1_MAGIC) {
			dp1->di_gen = newfs_random();
			dp1++;
		} else {
			dp2->di_gen = newfs_random();
			dp2++;
		}
	}
	wtfs(fsbtodb(&sblock, cgimin(&sblock, cylno)), iobufsize, iobuf);
	/*
	 * For the old file system, we have to initialize all the inodes.
	 */
	if (Oflag == 1) {
		for (i = 2 * sblock.fs_frag;
		     i < sblock.fs_ipg / INOPF(&sblock);
		     i += sblock.fs_frag) {
			dp1 = (struct ufs1_dinode *)(&iobuf[start]);
			for (j = 0; j < INOPB(&sblock); j++) {
				dp1->di_gen = newfs_random();
				dp1++;
			}
			wtfs(fsbtodb(&sblock, cgimin(&sblock, cylno) + i),
			    sblock.fs_bsize, &iobuf[start]);
		}
	}
}
Esempio n. 2
0
/*
 * This computes the fields of the  ext2_sb_info structure from the
 * data in the ext2_super_block structure read in.
 */
static int
compute_sb_data(struct vnode *devvp, struct ext2fs *es,
    struct m_ext2fs *fs)
{
	int db_count, error;
	int i;
	int logic_sb_block = 1;	/* XXX for now */
	struct buf *bp;
	uint32_t e2fs_descpb;

	fs->e2fs_bshift = EXT2_MIN_BLOCK_LOG_SIZE + es->e2fs_log_bsize;
	fs->e2fs_bsize = 1U << fs->e2fs_bshift;
	fs->e2fs_fsbtodb = es->e2fs_log_bsize + 1;
	fs->e2fs_qbmask = fs->e2fs_bsize - 1;
	fs->e2fs_fsize = EXT2_MIN_FRAG_SIZE << es->e2fs_log_fsize;
	if (fs->e2fs_fsize)
		fs->e2fs_fpb = fs->e2fs_bsize / fs->e2fs_fsize;
	fs->e2fs_bpg = es->e2fs_bpg;
	fs->e2fs_fpg = es->e2fs_fpg;
	fs->e2fs_ipg = es->e2fs_ipg;
	if (es->e2fs_rev == E2FS_REV0) {
		fs->e2fs_isize = E2FS_REV0_INODE_SIZE ;
	} else {
		fs->e2fs_isize = es->e2fs_inode_size;

		/*
		 * Simple sanity check for superblock inode size value.
		 */
		if (EXT2_INODE_SIZE(fs) < E2FS_REV0_INODE_SIZE ||
		    EXT2_INODE_SIZE(fs) > fs->e2fs_bsize ||
		    (fs->e2fs_isize & (fs->e2fs_isize - 1)) != 0) {
			printf("ext2fs: invalid inode size %d\n",
			    fs->e2fs_isize);
			return (EIO);
		}
	}
	/* Check for extra isize in big inodes. */
	if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_EXTRA_ISIZE) &&
	    EXT2_INODE_SIZE(fs) < sizeof(struct ext2fs_dinode)) {
		printf("ext2fs: no space for extra inode timestamps\n");
		return (EINVAL);
	}

	fs->e2fs_ipb = fs->e2fs_bsize / EXT2_INODE_SIZE(fs);
	fs->e2fs_itpg = fs->e2fs_ipg /fs->e2fs_ipb;
	/* s_resuid / s_resgid ? */
	fs->e2fs_gcount = (es->e2fs_bcount - es->e2fs_first_dblock +
	    EXT2_BLOCKS_PER_GROUP(fs) - 1) / EXT2_BLOCKS_PER_GROUP(fs);
	e2fs_descpb = fs->e2fs_bsize / sizeof(struct ext2_gd);
	db_count = (fs->e2fs_gcount + e2fs_descpb - 1) / e2fs_descpb;
	fs->e2fs_gdbcount = db_count;
	fs->e2fs_gd = malloc(db_count * fs->e2fs_bsize,
	    M_EXT2MNT, M_WAITOK);
	fs->e2fs_contigdirs = malloc(fs->e2fs_gcount *
	    sizeof(*fs->e2fs_contigdirs), M_EXT2MNT, M_WAITOK);

	/*
	 * Adjust logic_sb_block.
	 * Godmar thinks: if the blocksize is greater than 1024, then
	 * the superblock is logically part of block zero.
	 */
	if(fs->e2fs_bsize > SBSIZE)
		logic_sb_block = 0;
	for (i = 0; i < db_count; i++) {
		error = bread(devvp ,
			 fsbtodb(fs, logic_sb_block + i + 1 ),
			fs->e2fs_bsize, NOCRED, &bp);
		if (error) {
			free(fs->e2fs_gd, M_EXT2MNT);
			brelse(bp);
			return (error);
		}
		e2fs_cgload((struct ext2_gd *)bp->b_data,
		    &fs->e2fs_gd[
			i * fs->e2fs_bsize / sizeof(struct ext2_gd)],
		    fs->e2fs_bsize);
		brelse(bp);
		bp = NULL;
	}
	fs->e2fs_total_dir = 0;
	for (i=0; i < fs->e2fs_gcount; i++){
		fs->e2fs_total_dir += fs->e2fs_gd[i].ext2bgd_ndirs;
		fs->e2fs_contigdirs[i] = 0;
	}
	if (es->e2fs_rev == E2FS_REV0 ||
	    !EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_LARGEFILE))
		fs->e2fs_maxfilesize = 0x7fffffff;
	else {
		fs->e2fs_maxfilesize = 0xffffffffffff;
		if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_HUGE_FILE))
			fs->e2fs_maxfilesize = 0x7fffffffffffffff;
	}
	if (es->e4fs_flags & E2FS_UNSIGNED_HASH) {
		fs->e2fs_uhash = 3;
	} else if ((es->e4fs_flags & E2FS_SIGNED_HASH) == 0) {
#ifdef __CHAR_UNSIGNED__
		es->e4fs_flags |= E2FS_UNSIGNED_HASH;
		fs->e2fs_uhash = 3;
#else
		es->e4fs_flags |= E2FS_SIGNED_HASH;
#endif
	}

	return (0);
}
Esempio n. 3
0
/*
 * Look up an EXT2FS dinode number to find its incore vnode, otherwise read it
 * in from disk.  If it is in core, wait for the lock bit to clear, then
 * return the inode locked.  Detection and handling of mount points must be
 * done by the calling routine.
 */
static int
ext2_vget(struct mount *mp, ino_t ino, int flags, struct vnode **vpp)
{
	struct m_ext2fs *fs;
	struct inode *ip;
	struct ext2mount *ump;
	struct buf *bp;
	struct vnode *vp;
	struct cdev *dev;
	struct thread *td;
	int i, error;
	int used_blocks;

	td = curthread;
	error = vfs_hash_get(mp, ino, flags, td, vpp, NULL, NULL);
	if (error || *vpp != NULL)
		return (error);

	ump = VFSTOEXT2(mp);
	dev = ump->um_dev;
	ip = malloc(sizeof(struct inode), M_EXT2NODE, M_WAITOK | M_ZERO);

	/* Allocate a new vnode/inode. */
	if ((error = getnewvnode("ext2fs", mp, &ext2_vnodeops, &vp)) != 0) {
		*vpp = NULL;
		free(ip, M_EXT2NODE);
		return (error);
	}
	vp->v_data = ip;
	ip->i_vnode = vp;
	ip->i_e2fs = fs = ump->um_e2fs;
	ip->i_ump  = ump;
	ip->i_number = ino;

	lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL);
	error = insmntque(vp, mp);
	if (error != 0) {
		free(ip, M_EXT2NODE);
		*vpp = NULL;
		return (error);
	}
	error = vfs_hash_insert(vp, ino, flags, td, vpp, NULL, NULL);
	if (error || *vpp != NULL)
		return (error);

	/* Read in the disk contents for the inode, copy into the inode. */
	if ((error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
	    (int)fs->e2fs_bsize, NOCRED, &bp)) != 0) {
		/*
		 * The inode does not contain anything useful, so it would
		 * be misleading to leave it on its hash chain. With mode
		 * still zero, it will be unlinked and returned to the free
		 * list by vput().
		 */
		brelse(bp);
		vput(vp);
		*vpp = NULL;
		return (error);
	}
	/* convert ext2 inode to dinode */
	ext2_ei2i((struct ext2fs_dinode *) ((char *)bp->b_data + EXT2_INODE_SIZE(fs) *
			ino_to_fsbo(fs, ino)), ip);
	ip->i_block_group = ino_to_cg(fs, ino);
	ip->i_next_alloc_block = 0;
	ip->i_next_alloc_goal = 0;

	/*
	 * Now we want to make sure that block pointers for unused
	 * blocks are zeroed out - ext2_balloc depends on this
	 * although for regular files and directories only
	 *
	 * If IN_E4EXTENTS is enabled, unused blocks are not zeroed
	 * out because we could corrupt the extent tree.
	 */
	if (!(ip->i_flag & IN_E4EXTENTS) &&
	    (S_ISDIR(ip->i_mode) || S_ISREG(ip->i_mode))) {
		used_blocks = (ip->i_size+fs->e2fs_bsize-1) / fs->e2fs_bsize;
		for (i = used_blocks; i < EXT2_NDIR_BLOCKS; i++)
			ip->i_db[i] = 0;
	}
/*
	ext2_print_inode(ip);
*/
	bqrelse(bp);

	/*
	 * Initialize the vnode from the inode, check for aliases.
	 * Note that the underlying vnode may have changed.
	 */
	if ((error = ext2_vinit(mp, &ext2_fifoops, &vp)) != 0) {
		vput(vp);
		*vpp = NULL;
		return (error);
	}

	/*
	 * Finish inode initialization.
	 */

	/*
	 * Set up a generation number for this inode if it does not
	 * already have one. This should only happen on old filesystems.
	 */
	if (ip->i_gen == 0) {
		ip->i_gen = random() + 1;
		if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0)
			ip->i_flag |= IN_MODIFIED;
	}
	*vpp = vp;
	return (0);
}
Esempio n. 4
0
static int32_t
ext2fs_alloccg(struct inode *ip, int cg, int32_t bpref, int size)
{
	struct m_ext2fs *fs;
	char *bbp;
	struct buf *bp;
	int error, bno, start, end, loc;

	fs = ip->i_e2fs;
	if (fs->e2fs_gd[cg].ext2bgd_nbfree == 0)
		return (0);
	error = bread(ip->i_devvp, fsbtodb(fs,
		fs->e2fs_gd[cg].ext2bgd_b_bitmap),
		(int)fs->e2fs_bsize, &bp);
	if (error || fs->e2fs_gd[cg].ext2bgd_nbfree == 0) {
		brelse(bp);
		return (0);
	}
	bbp = (char *)bp->b_data;

	if (dtog(fs, bpref) != cg)
		bpref = 0;
	if (bpref != 0) {
		bpref = dtogd(fs, bpref);
		/*
		 * if the requested block is available, use it
		 */
		if (isclr(bbp, bpref)) {
			bno = bpref;
			goto gotit;
		}
	}
	/*
	 * no blocks in the requested cylinder, so take next
	 * available one in this cylinder group.
	 * first try to get 8 contigous blocks, then fall back to a single
	 * block.
	 */
	if (bpref)
		start = dtogd(fs, bpref) / NBBY;
	else
		start = 0;
	end = howmany(fs->e2fs.e2fs_fpg, NBBY) - start;
	for (loc = start; loc < end; loc++) {
		if (bbp[loc] == 0) {
			bno = loc * NBBY;
			goto gotit;
		}
	}
	for (loc = 0; loc < start; loc++) {
		if (bbp[loc] == 0) {
			bno = loc * NBBY;
			goto gotit;
		}
	}

	bno = ext2fs_mapsearch(fs, bbp, bpref);
	if (bno < 0)
		return (0);
gotit:
#ifdef DIAGNOSTIC
	if (isset(bbp, (long)bno)) {
		printf("ext2fs_alloccgblk: cg=%d bno=%d fs=%s\n",
			cg, bno, fs->e2fs_fsmnt);
		panic("ext2fs_alloccg: dup alloc");
	}
#endif
	setbit(bbp, (long)bno);
	fs->e2fs.e2fs_fbcount--;
	fs->e2fs_gd[cg].ext2bgd_nbfree--;
	fs->e2fs_fmod = 1;
	bdwrite(bp);
	return (cg * fs->e2fs.e2fs_fpg + fs->e2fs.e2fs_first_dblock + bno);
}
Esempio n. 5
0
static int
dumpfs(const char *name)
{
	time_t fstime;
	int64_t fssize;
	int32_t fsflags;
	int i;

	switch (disk.d_ufs) {
	case 2:
		fssize = afs.fs_size;
		fstime = afs.fs_time;
		printf("magic\t%x (UFS2)\ttime\t%s",
		    afs.fs_magic, ctime(&fstime));
		printf("superblock location\t%jd\tid\t[ %x %x ]\n",
		    (intmax_t)afs.fs_sblockloc, afs.fs_id[0], afs.fs_id[1]);
		printf("ncg\t%d\tsize\t%jd\tblocks\t%jd\n",
		    afs.fs_ncg, (intmax_t)fssize, (intmax_t)afs.fs_dsize);
		break;
	case 1:
		fssize = afs.fs_old_size;
		fstime = afs.fs_old_time;
		printf("magic\t%x (UFS1)\ttime\t%s",
		    afs.fs_magic, ctime(&fstime));
		printf("id\t[ %08x %08x ]\n", afs.fs_id[0], afs.fs_id[1]);
		printf("ncg\t%d\tsize\t%jd\tblocks\t%jd\n",
		    afs.fs_ncg, (intmax_t)fssize, (intmax_t)afs.fs_dsize);
		break;
	default:
		goto err;
	}
	printf("bsize\t%d\tshift\t%d\tmask\t0x%08x\n",
	    afs.fs_bsize, afs.fs_bshift, afs.fs_bmask);
	printf("fsize\t%d\tshift\t%d\tmask\t0x%08x\n",
	    afs.fs_fsize, afs.fs_fshift, afs.fs_fmask);
	printf("frag\t%d\tshift\t%d\tfsbtodb\t%d\n",
	    afs.fs_frag, afs.fs_fragshift, afs.fs_fsbtodb);
	printf("minfree\t%d%%\toptim\t%s\tsymlinklen %d\n",
	    afs.fs_minfree, afs.fs_optim == FS_OPTSPACE ? "space" : "time",
	    afs.fs_maxsymlinklen);
	switch (disk.d_ufs) {
	case 2:
		printf("%s %d\tmaxbpg\t%d\tmaxcontig %d\tcontigsumsize %d\n",
		    "maxbsize", afs.fs_maxbsize, afs.fs_maxbpg,
		    afs.fs_maxcontig, afs.fs_contigsumsize);
		printf("nbfree\t%jd\tndir\t%jd\tnifree\t%jd\tnffree\t%jd\n",
		    (intmax_t)afs.fs_cstotal.cs_nbfree, 
		    (intmax_t)afs.fs_cstotal.cs_ndir,
		    (intmax_t)afs.fs_cstotal.cs_nifree, 
		    (intmax_t)afs.fs_cstotal.cs_nffree);
		printf("bpg\t%d\tfpg\t%d\tipg\t%d\tunrefs\t%jd\n",
		    afs.fs_fpg / afs.fs_frag, afs.fs_fpg, afs.fs_ipg,
		    (intmax_t)afs.fs_unrefs);
		printf("nindir\t%d\tinopb\t%d\tmaxfilesize\t%ju\n",
		    afs.fs_nindir, afs.fs_inopb, 
		    (uintmax_t)afs.fs_maxfilesize);
		printf("sbsize\t%d\tcgsize\t%d\tcsaddr\t%jd\tcssize\t%d\n",
		    afs.fs_sbsize, afs.fs_cgsize, (intmax_t)afs.fs_csaddr,
		    afs.fs_cssize);
		break;
	case 1:
		printf("maxbpg\t%d\tmaxcontig %d\tcontigsumsize %d\n",
		    afs.fs_maxbpg, afs.fs_maxcontig, afs.fs_contigsumsize);
		printf("nbfree\t%d\tndir\t%d\tnifree\t%d\tnffree\t%d\n",
		    afs.fs_old_cstotal.cs_nbfree, afs.fs_old_cstotal.cs_ndir,
		    afs.fs_old_cstotal.cs_nifree, afs.fs_old_cstotal.cs_nffree);
		printf("cpg\t%d\tbpg\t%d\tfpg\t%d\tipg\t%d\n",
		    afs.fs_old_cpg, afs.fs_fpg / afs.fs_frag, afs.fs_fpg,
		    afs.fs_ipg);
		printf("nindir\t%d\tinopb\t%d\tnspf\t%d\tmaxfilesize\t%ju\n",
		    afs.fs_nindir, afs.fs_inopb, afs.fs_old_nspf,
		    (uintmax_t)afs.fs_maxfilesize);
		printf("sbsize\t%d\tcgsize\t%d\tcgoffset %d\tcgmask\t0x%08x\n",
		    afs.fs_sbsize, afs.fs_cgsize, afs.fs_old_cgoffset,
		    afs.fs_old_cgmask);
		printf("csaddr\t%d\tcssize\t%d\n",
		    afs.fs_old_csaddr, afs.fs_cssize);
		printf("rotdelay %dms\trps\t%d\ttrackskew %d\tinterleave %d\n",
		    afs.fs_old_rotdelay, afs.fs_old_rps, afs.fs_old_trackskew,
		    afs.fs_old_interleave);
		printf("nsect\t%d\tnpsect\t%d\tspc\t%d\n",
		    afs.fs_old_nsect, afs.fs_old_npsect, afs.fs_old_spc);
		break;
	default:
		goto err;
	}
	printf("sblkno\t%d\tcblkno\t%d\tiblkno\t%d\tdblkno\t%d\n",
	    afs.fs_sblkno, afs.fs_cblkno, afs.fs_iblkno, afs.fs_dblkno);
	printf("cgrotor\t%d\tfmod\t%d\tronly\t%d\tclean\t%d\n",
	    afs.fs_cgrotor, afs.fs_fmod, afs.fs_ronly, afs.fs_clean);
	printf("avgfpdir %d\tavgfilesize %d\n",
	    afs.fs_avgfpdir, afs.fs_avgfilesize);
	printf("flags\t");
	if (afs.fs_old_flags & FS_FLAGS_UPDATED)
		fsflags = afs.fs_flags;
	else
		fsflags = afs.fs_old_flags;
	if (fsflags == 0)
		printf("none");
	if (fsflags & FS_UNCLEAN)
		printf("unclean ");
	if (fsflags & FS_DOSOFTDEP)
		printf("soft-updates%s ", (fsflags & FS_SUJ) ? "+journal" : "");
	if (fsflags & FS_NEEDSFSCK)
		printf("needs fsck run ");
	if (fsflags & FS_INDEXDIRS)
		printf("indexed directories ");
	if (fsflags & FS_ACLS)
		printf("acls ");
	if (fsflags & FS_MULTILABEL)
		printf("multilabel ");
	if (fsflags & FS_GJOURNAL)
		printf("gjournal ");
	if (fsflags & FS_FLAGS_UPDATED)
		printf("fs_flags expanded ");
	if (fsflags & FS_NFS4ACLS)
		printf("nfsv4acls ");
	if (fsflags & FS_TRIM)
		printf("trim ");
	fsflags &= ~(FS_UNCLEAN | FS_DOSOFTDEP | FS_NEEDSFSCK | FS_INDEXDIRS |
		     FS_ACLS | FS_MULTILABEL | FS_GJOURNAL | FS_FLAGS_UPDATED |
		     FS_NFS4ACLS | FS_SUJ | FS_TRIM);
	if (fsflags != 0)
		printf("unknown flags (%#x)", fsflags);
	putchar('\n');
	printf("fsmnt\t%s\n", afs.fs_fsmnt);
	printf("volname\t%s\tswuid\t%ju\n",
		afs.fs_volname, (uintmax_t)afs.fs_swuid);
	printf("\ncs[].cs_(nbfree,ndir,nifree,nffree):\n\t");
	afs.fs_csp = calloc(1, afs.fs_cssize);
	if (bread(&disk, fsbtodb(&afs, afs.fs_csaddr), afs.fs_csp, afs.fs_cssize) == -1)
		goto err;
	for (i = 0; i < afs.fs_ncg; i++) {
		struct csum *cs = &afs.fs_cs(&afs, i);
		if (i && i % 4 == 0)
			printf("\n\t");
		printf("(%d,%d,%d,%d) ",
		    cs->cs_nbfree, cs->cs_ndir, cs->cs_nifree, cs->cs_nffree);
	}
	printf("\n");
	if (fssize % afs.fs_fpg) {
		if (disk.d_ufs == 1)
			printf("cylinders in last group %d\n",
			    howmany(afs.fs_old_size % afs.fs_fpg,
			    afs.fs_old_spc / afs.fs_old_nspf));
		printf("blocks in last group %ld\n\n",
		    (long)((fssize % afs.fs_fpg) / afs.fs_frag));
	}
	while ((i = cgread(&disk)) != 0) {
		if (i == -1 || dumpcg())
			goto err;
	}
	return (0);

err:	ufserr(name);
	return (1);
}
Esempio n. 6
0
static int ufs_mkdir(uufsd_t *ufs, ino_t parent, ino_t inum, char *name)
{
	int		retval;
	struct ufs_vnode	*parent_vnode = NULL, *vnode = NULL;
	struct inode *parent_inode, *inode;
	ino_t		ino = inum;
	ino_t		scratch_ino;
	ufs2_daddr_t		blk;
	char			*block = 0;
	struct fs *fs = &ufs->d_fs;
	int dirsize = DIRBLKSIZ;
	int blocksize = fragroundup(fs, dirsize);

	parent_vnode = vnode_get(ufs, parent);
	if (!parent_vnode) {
		return ENOENT;
	}

	parent_inode = vnode2inode(parent_vnode);
	/*
	 * Allocate an inode, if necessary
	 */
	if (!ino) {
		retval = ufs_valloc(parent_vnode, DTTOIF(DT_DIR), &vnode);
		if (retval)
			goto cleanup;
		ino = vnode->inode.i_number;
		inode = vnode2inode(vnode);
	}

	/*
	 * Allocate a data block for the directory
	 */
	retval = ufs_block_alloc(ufs, inode, fragroundup(fs, dirsize), &blk);
	if (retval)
		goto cleanup;

	/*
	 * Create a scratch template for the directory
	 */
	retval = ufs_new_dir_block(ufs, vnode->inode.i_number, parent_vnode, &block);
	if (retval)
		goto cleanup;

	/*
	 * Get the parent's inode, if necessary
	if (parent != ino) {
		parent_vnode = vnode_get(ufs, parent);
		if (retval)
			goto cleanup;
	} else
		memset(&parent_inode, 0, sizeof(parent_inode));
	 */

	/*
	 * Create the inode structure....
	 */
	inode->i_mode = DT_DIR | (0777);
	inode->i_uid = inode->i_gid = 0;
	UFS_DINODE(inode)->di_db[0] = blk;
	inode->i_nlink = 1;
	inode->i_size = dirsize;

	/*
	 * Write out the inode and inode data block
	 */
	retval = blkwrite(ufs, fsbtodb(fs, blk), block, blocksize);
	if (retval == -1)
		goto cleanup;

	/*
	 * Link the directory into the filesystem hierarchy
	 */
	if (name) {
		retval = ufs_lookup(ufs, parent, name, strlen(name),
				       &scratch_ino);
		if (!retval) {
			retval = EEXIST;
			name = 0;
			goto cleanup;
		}
		if (retval != ENOENT)
			goto cleanup;
		retval = ufs_link(ufs, parent, name, vnode, DTTOIF(DT_DIR));
		if (retval)
			goto cleanup;
	}

	/*
	 * Update parent inode's counts
	 */
	if (parent != ino) {
		parent_inode->i_nlink++;
	}

cleanup:
	if (vnode)
		vnode_put(vnode, 1);

	if (parent_vnode)
		vnode_put(parent_vnode, 1);
	if (block)
		ufs_free_mem(&block);
	return retval;


}
Esempio n. 7
0
/*
 * Truncate the inode oip to at most length size, freeing the
 * disk blocks.
 */
int
ext2_truncate(struct vnode *vp, off_t length, int flags, struct ucred *cred,
    struct thread *td)
{
	struct vnode *ovp = vp;
	int32_t lastblock;
	struct inode *oip;
	int32_t bn, lbn, lastiblock[NIADDR], indir_lbn[NIADDR];
	uint32_t oldblks[NDADDR + NIADDR], newblks[NDADDR + NIADDR];
	struct m_ext2fs *fs;
	struct buf *bp;
	int offset, size, level;
	e4fs_daddr_t count, nblocks, blocksreleased = 0;
	int error, i, allerror;
	off_t osize;
#ifdef INVARIANTS
	struct bufobj *bo;
#endif

	oip = VTOI(ovp);
#ifdef INVARIANTS
	bo = &ovp->v_bufobj;
#endif

	ASSERT_VOP_LOCKED(vp, "ext2_truncate");	

	if (length < 0)
	    return (EINVAL);

	if (ovp->v_type == VLNK &&
	    oip->i_size < ovp->v_mount->mnt_maxsymlinklen) {
#ifdef INVARIANTS
		if (length != 0)
			panic("ext2_truncate: partial truncate of symlink");
#endif
		bzero((char *)&oip->i_shortlink, (u_int)oip->i_size);
		oip->i_size = 0;
		oip->i_flag |= IN_CHANGE | IN_UPDATE;
		return (ext2_update(ovp, 1));
	}
	if (oip->i_size == length) {
		oip->i_flag |= IN_CHANGE | IN_UPDATE;
		return (ext2_update(ovp, 0));
	}
	fs = oip->i_e2fs;
	osize = oip->i_size;
	/*
	 * Lengthen the size of the file. We must ensure that the
	 * last byte of the file is allocated. Since the smallest
	 * value of osize is 0, length will be at least 1.
	 */
	if (osize < length) {
		if (length > oip->i_e2fs->e2fs_maxfilesize)
			return (EFBIG);
		vnode_pager_setsize(ovp, length);
		offset = blkoff(fs, length - 1);
		lbn = lblkno(fs, length - 1);
		flags |= BA_CLRBUF;
		error = ext2_balloc(oip, lbn, offset + 1, cred, &bp, flags);
		if (error) {
			vnode_pager_setsize(vp, osize);
			return (error);
		}
		oip->i_size = length;
		if (bp->b_bufsize == fs->e2fs_bsize)
			bp->b_flags |= B_CLUSTEROK;
		if (flags & IO_SYNC)
			bwrite(bp);
		else if (DOINGASYNC(ovp))
			bdwrite(bp);
		else
			bawrite(bp);
		oip->i_flag |= IN_CHANGE | IN_UPDATE;
		return (ext2_update(ovp, !DOINGASYNC(ovp)));
	}
	/*
	 * Shorten the size of the file. If the file is not being
	 * truncated to a block boundry, the contents of the
	 * partial block following the end of the file must be
	 * zero'ed in case it ever become accessible again because
	 * of subsequent file growth.
	 */
	/* I don't understand the comment above */
	offset = blkoff(fs, length);
	if (offset == 0) {
		oip->i_size = length;
	} else {
		lbn = lblkno(fs, length);
		flags |= BA_CLRBUF;
		error = ext2_balloc(oip, lbn, offset, cred, &bp, flags);
		if (error)
			return (error);
		oip->i_size = length;
		size = blksize(fs, oip, lbn);
		bzero((char *)bp->b_data + offset, (u_int)(size - offset));
		allocbuf(bp, size);
		if (bp->b_bufsize == fs->e2fs_bsize)
			bp->b_flags |= B_CLUSTEROK;
		if (flags & IO_SYNC)
			bwrite(bp);
		else if (DOINGASYNC(ovp))
			bdwrite(bp);
		else
			bawrite(bp);
	}
	/*
	 * Calculate index into inode's block list of
	 * last direct and indirect blocks (if any)
	 * which we want to keep.  Lastblock is -1 when
	 * the file is truncated to 0.
	 */
	lastblock = lblkno(fs, length + fs->e2fs_bsize - 1) - 1;
	lastiblock[SINGLE] = lastblock - NDADDR;
	lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs);
	lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs);
	nblocks = btodb(fs->e2fs_bsize);
	/*
	 * Update file and block pointers on disk before we start freeing
	 * blocks.  If we crash before free'ing blocks below, the blocks
	 * will be returned to the free list.  lastiblock values are also
	 * normalized to -1 for calls to ext2_indirtrunc below.
	 */
	for (level = TRIPLE; level >= SINGLE; level--) {
		oldblks[NDADDR + level] = oip->i_ib[level];
		if (lastiblock[level] < 0) {
			oip->i_ib[level] = 0;
			lastiblock[level] = -1;
		}
	}
	for (i = 0; i < NDADDR; i++) {
		oldblks[i] = oip->i_db[i];
		if (i > lastblock)
			oip->i_db[i] = 0;
	}
	oip->i_flag |= IN_CHANGE | IN_UPDATE;
	allerror = ext2_update(ovp, !DOINGASYNC(ovp));

	/*
	 * Having written the new inode to disk, save its new configuration
	 * and put back the old block pointers long enough to process them.
	 * Note that we save the new block configuration so we can check it
	 * when we are done.
	 */
	for (i = 0; i < NDADDR; i++) {
		newblks[i] = oip->i_db[i];
		oip->i_db[i] = oldblks[i];
	}
	for (i = 0; i < NIADDR; i++) {
		newblks[NDADDR + i] = oip->i_ib[i];
		oip->i_ib[i] = oldblks[NDADDR + i];
	}
	oip->i_size = osize;
	error = vtruncbuf(ovp, cred, length, (int)fs->e2fs_bsize);
	if (error && (allerror == 0))
		allerror = error;
	vnode_pager_setsize(ovp, length);

	/*
	 * Indirect blocks first.
	 */
	indir_lbn[SINGLE] = -NDADDR;
	indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1;
	indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1;
	for (level = TRIPLE; level >= SINGLE; level--) {
		bn = oip->i_ib[level];
		if (bn != 0) {
			error = ext2_indirtrunc(oip, indir_lbn[level],
			    fsbtodb(fs, bn), lastiblock[level], level, &count);
			if (error)
				allerror = error;
			blocksreleased += count;
			if (lastiblock[level] < 0) {
				oip->i_ib[level] = 0;
				ext2_blkfree(oip, bn, fs->e2fs_fsize);
				blocksreleased += nblocks;
			}
		}
		if (lastiblock[level] >= 0)
			goto done;
	}

	/*
	 * All whole direct blocks or frags.
	 */
	for (i = NDADDR - 1; i > lastblock; i--) {
		long bsize;

		bn = oip->i_db[i];
		if (bn == 0)
			continue;
		oip->i_db[i] = 0;
		bsize = blksize(fs, oip, i);
		ext2_blkfree(oip, bn, bsize);
		blocksreleased += btodb(bsize);
	}
	if (lastblock < 0)
		goto done;

	/*
	 * Finally, look for a change in size of the
	 * last direct block; release any frags.
	 */
	bn = oip->i_db[lastblock];
	if (bn != 0) {
		long oldspace, newspace;

		/*
		 * Calculate amount of space we're giving
		 * back as old block size minus new block size.
		 */
		oldspace = blksize(fs, oip, lastblock);
		oip->i_size = length;
		newspace = blksize(fs, oip, lastblock);
		if (newspace == 0)
			panic("ext2_truncate: newspace");
		if (oldspace - newspace > 0) {
			/*
			 * Block number of space to be free'd is
			 * the old block # plus the number of frags
			 * required for the storage we're keeping.
			 */
			bn += numfrags(fs, newspace);
			ext2_blkfree(oip, bn, oldspace - newspace);
			blocksreleased += btodb(oldspace - newspace);
		}
	}
done:
#ifdef INVARIANTS
	for (level = SINGLE; level <= TRIPLE; level++)
		if (newblks[NDADDR + level] != oip->i_ib[level])
			panic("itrunc1");
	for (i = 0; i < NDADDR; i++)
		if (newblks[i] != oip->i_db[i])
			panic("itrunc2");
	BO_LOCK(bo);
	if (length == 0 && (bo->bo_dirty.bv_cnt != 0 ||
	    bo->bo_clean.bv_cnt != 0))
		panic("itrunc3");
	BO_UNLOCK(bo);
#endif /* INVARIANTS */
	/*
	 * Put back the real size.
	 */
	oip->i_size = length;
	if (oip->i_blocks >= blocksreleased)
		oip->i_blocks -= blocksreleased;
	else				/* sanity */
		oip->i_blocks = 0;
	oip->i_flag |= IN_CHANGE;
	vnode_pager_setsize(ovp, length);
	return (allerror);
}
Esempio n. 8
0
struct fs *
ffs_mkfs(const char *fsys, const fsinfo_t *fsopts)
{
	int fragsperinode, optimalfpg, origdensity, minfpg, lastminfpg;
	int32_t cylno, i, csfrags;
	long long sizepb;
	void *space;
	int size, blks;
	int nprintcols, printcolwidth;
	ffs_opt_t	*ffs_opts = fsopts->fs_specific;

	Oflag =		ffs_opts->version;
	fssize =        fsopts->size / fsopts->sectorsize;
	sectorsize =    fsopts->sectorsize;
	fsize =         ffs_opts->fsize;
	bsize =         ffs_opts->bsize;
	maxbsize =      ffs_opts->maxbsize;
	maxblkspercg =  ffs_opts->maxblkspercg;
	minfree =       ffs_opts->minfree;
	opt =           ffs_opts->optimization;
	density =       ffs_opts->density;
	maxcontig =     ffs_opts->maxcontig;
	maxbpg =        ffs_opts->maxbpg;
	avgfilesize =   ffs_opts->avgfilesize;
	avgfpdir =      ffs_opts->avgfpdir;
	bbsize =        BBSIZE;
	sbsize =        SBLOCKSIZE;

	strlcpy(sblock.fs_volname, ffs_opts->label, sizeof(sblock.fs_volname));

	if (Oflag == 0) {
		sblock.fs_old_inodefmt = FS_42INODEFMT;
		sblock.fs_maxsymlinklen = 0;
		sblock.fs_old_flags = 0;
	} else {
		sblock.fs_old_inodefmt = FS_44INODEFMT;
		sblock.fs_maxsymlinklen = (Oflag == 1 ? MAXSYMLINKLEN_UFS1 :
		    MAXSYMLINKLEN_UFS2);
		sblock.fs_old_flags = FS_FLAGS_UPDATED;
		sblock.fs_flags = 0;
	}
	/*
	 * Validate the given file system size.
	 * Verify that its last block can actually be accessed.
	 * Convert to file system fragment sized units.
	 */
	if (fssize <= 0) {
		printf("preposterous size %lld\n", (long long)fssize);
		exit(13);
	}
	ffs_wtfs(fssize - 1, sectorsize, (char *)&sblock, fsopts);

	/*
	 * collect and verify the filesystem density info
	 */
	sblock.fs_avgfilesize = avgfilesize;
	sblock.fs_avgfpdir = avgfpdir;
	if (sblock.fs_avgfilesize <= 0)
		printf("illegal expected average file size %d\n",
		    sblock.fs_avgfilesize), exit(14);
	if (sblock.fs_avgfpdir <= 0)
		printf("illegal expected number of files per directory %d\n",
		    sblock.fs_avgfpdir), exit(15);
	/*
	 * collect and verify the block and fragment sizes
	 */
	sblock.fs_bsize = bsize;
	sblock.fs_fsize = fsize;
	if (!POWEROF2(sblock.fs_bsize)) {
		printf("block size must be a power of 2, not %d\n",
		    sblock.fs_bsize);
		exit(16);
	}
	if (!POWEROF2(sblock.fs_fsize)) {
		printf("fragment size must be a power of 2, not %d\n",
		    sblock.fs_fsize);
		exit(17);
	}
	if (sblock.fs_fsize < sectorsize) {
		printf("fragment size %d is too small, minimum is %d\n",
		    sblock.fs_fsize, sectorsize);
		exit(18);
	}
	if (sblock.fs_bsize < MINBSIZE) {
		printf("block size %d is too small, minimum is %d\n",
		    sblock.fs_bsize, MINBSIZE);
		exit(19);
	}
	if (sblock.fs_bsize > FFS_MAXBSIZE) {
		printf("block size %d is too large, maximum is %d\n",
		    sblock.fs_bsize, FFS_MAXBSIZE);
		exit(19);
	}
	if (sblock.fs_bsize < sblock.fs_fsize) {
		printf("block size (%d) cannot be smaller than fragment size (%d)\n",
		    sblock.fs_bsize, sblock.fs_fsize);
		exit(20);
	}

	if (maxbsize < bsize || !POWEROF2(maxbsize)) {
		sblock.fs_maxbsize = sblock.fs_bsize;
		printf("Extent size set to %d\n", sblock.fs_maxbsize);
	} else if (sblock.fs_maxbsize > FS_MAXCONTIG * sblock.fs_bsize) {
		sblock.fs_maxbsize = FS_MAXCONTIG * sblock.fs_bsize;
		printf("Extent size reduced to %d\n", sblock.fs_maxbsize);
	} else {
		sblock.fs_maxbsize = maxbsize;
	}
	sblock.fs_maxcontig = maxcontig;
	if (sblock.fs_maxcontig < sblock.fs_maxbsize / sblock.fs_bsize) {
		sblock.fs_maxcontig = sblock.fs_maxbsize / sblock.fs_bsize;
		printf("Maxcontig raised to %d\n", sblock.fs_maxbsize);
	}

	if (sblock.fs_maxcontig > 1)
		sblock.fs_contigsumsize = MIN(sblock.fs_maxcontig,FS_MAXCONTIG);

	sblock.fs_bmask = ~(sblock.fs_bsize - 1);
	sblock.fs_fmask = ~(sblock.fs_fsize - 1);
	sblock.fs_qbmask = ~sblock.fs_bmask;
	sblock.fs_qfmask = ~sblock.fs_fmask;
	for (sblock.fs_bshift = 0, i = sblock.fs_bsize; i > 1; i >>= 1)
		sblock.fs_bshift++;
	for (sblock.fs_fshift = 0, i = sblock.fs_fsize; i > 1; i >>= 1)
		sblock.fs_fshift++;
	sblock.fs_frag = numfrags(&sblock, sblock.fs_bsize);
	for (sblock.fs_fragshift = 0, i = sblock.fs_frag; i > 1; i >>= 1)
		sblock.fs_fragshift++;
	if (sblock.fs_frag > MAXFRAG) {
		printf("fragment size %d is too small, "
			"minimum with block size %d is %d\n",
		    sblock.fs_fsize, sblock.fs_bsize,
		    sblock.fs_bsize / MAXFRAG);
		exit(21);
	}
	sblock.fs_fsbtodb = ilog2(sblock.fs_fsize / sectorsize);
	sblock.fs_size = fssize = dbtofsb(&sblock, fssize);

	if (Oflag <= 1) {
		sblock.fs_magic = FS_UFS1_MAGIC;
		sblock.fs_sblockloc = SBLOCK_UFS1;
		sblock.fs_nindir = sblock.fs_bsize / sizeof(int32_t);
		sblock.fs_inopb = sblock.fs_bsize / sizeof(struct ufs1_dinode);
		sblock.fs_maxsymlinklen = ((NDADDR + NIADDR) *
		    sizeof (int32_t));
		sblock.fs_old_inodefmt = FS_44INODEFMT;
		sblock.fs_old_cgoffset = 0;
		sblock.fs_old_cgmask = 0xffffffff;
		sblock.fs_old_size = sblock.fs_size;
		sblock.fs_old_rotdelay = 0;
		sblock.fs_old_rps = 60;
		sblock.fs_old_nspf = sblock.fs_fsize / sectorsize;
		sblock.fs_old_cpg = 1;
		sblock.fs_old_interleave = 1;
		sblock.fs_old_trackskew = 0;
		sblock.fs_old_cpc = 0;
		sblock.fs_old_postblformat = 1;
		sblock.fs_old_nrpos = 1;
	} else {
		sblock.fs_magic = FS_UFS2_MAGIC;
#if 0 /* XXX makefs is used for small filesystems. */
		sblock.fs_sblockloc = SBLOCK_UFS2;
#else
		sblock.fs_sblockloc = SBLOCK_UFS1;
#endif
		sblock.fs_nindir = sblock.fs_bsize / sizeof(int64_t);
		sblock.fs_inopb = sblock.fs_bsize / sizeof(struct ufs2_dinode);
		sblock.fs_maxsymlinklen = ((NDADDR + NIADDR) *
		    sizeof (int64_t));
	}

	sblock.fs_sblkno =
	    roundup(howmany(sblock.fs_sblockloc + SBLOCKSIZE, sblock.fs_fsize),
		sblock.fs_frag);
	sblock.fs_cblkno = (daddr_t)(sblock.fs_sblkno +
	    roundup(howmany(SBLOCKSIZE, sblock.fs_fsize), sblock.fs_frag));
	sblock.fs_iblkno = sblock.fs_cblkno + sblock.fs_frag;
	sblock.fs_maxfilesize = sblock.fs_bsize * NDADDR - 1;
	for (sizepb = sblock.fs_bsize, i = 0; i < NIADDR; i++) {
		sizepb *= NINDIR(&sblock);
		sblock.fs_maxfilesize += sizepb;
	}

	/*
	 * Calculate the number of blocks to put into each cylinder group.
	 *
	 * This algorithm selects the number of blocks per cylinder
	 * group. The first goal is to have at least enough data blocks
	 * in each cylinder group to meet the density requirement. Once
	 * this goal is achieved we try to expand to have at least
	 * 1 cylinder group. Once this goal is achieved, we pack as
	 * many blocks into each cylinder group map as will fit.
	 *
	 * We start by calculating the smallest number of blocks that we
	 * can put into each cylinder group. If this is too big, we reduce
	 * the density until it fits.
	 */
	origdensity = density;
	for (;;) {
		fragsperinode = MAX(numfrags(&sblock, density), 1);
		minfpg = fragsperinode * INOPB(&sblock);
		if (minfpg > sblock.fs_size)
			minfpg = sblock.fs_size;
		sblock.fs_ipg = INOPB(&sblock);
		sblock.fs_fpg = roundup(sblock.fs_iblkno +
		    sblock.fs_ipg / INOPF(&sblock), sblock.fs_frag);
		if (sblock.fs_fpg < minfpg)
			sblock.fs_fpg = minfpg;
		sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode),
		    INOPB(&sblock));
		sblock.fs_fpg = roundup(sblock.fs_iblkno +
		    sblock.fs_ipg / INOPF(&sblock), sblock.fs_frag);
		if (sblock.fs_fpg < minfpg)
			sblock.fs_fpg = minfpg;
		sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode),
		    INOPB(&sblock));
		if (CGSIZE(&sblock) < (unsigned long)sblock.fs_bsize)
			break;
		density -= sblock.fs_fsize;
	}
	if (density != origdensity)
		printf("density reduced from %d to %d\n", origdensity, density);

	if (maxblkspercg <= 0 || maxblkspercg >= fssize)
		maxblkspercg = fssize - 1;
	/*
	 * Start packing more blocks into the cylinder group until
	 * it cannot grow any larger, the number of cylinder groups
	 * drops below 1, or we reach the size requested.
	 */
	for ( ; sblock.fs_fpg < maxblkspercg; sblock.fs_fpg += sblock.fs_frag) {
		sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode),
		    INOPB(&sblock));
		if (sblock.fs_size / sblock.fs_fpg < 1)
			break;
		if (CGSIZE(&sblock) < (unsigned long)sblock.fs_bsize)
			continue;
		if (CGSIZE(&sblock) == (unsigned long)sblock.fs_bsize)
			break;
		sblock.fs_fpg -= sblock.fs_frag;
		sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode),
		    INOPB(&sblock));
		break;
	}
	/*
	 * Check to be sure that the last cylinder group has enough blocks
	 * to be viable. If it is too small, reduce the number of blocks
	 * per cylinder group which will have the effect of moving more
	 * blocks into the last cylinder group.
	 */
	optimalfpg = sblock.fs_fpg;
	for (;;) {
		sblock.fs_ncg = howmany(sblock.fs_size, sblock.fs_fpg);
		lastminfpg = roundup(sblock.fs_iblkno +
		    sblock.fs_ipg / INOPF(&sblock), sblock.fs_frag);
		if (sblock.fs_size < lastminfpg) {
			printf("Filesystem size %lld < minimum size of %d\n",
			    (long long)sblock.fs_size, lastminfpg);
			exit(28);
		}
		if (sblock.fs_size % sblock.fs_fpg >= lastminfpg ||
		    sblock.fs_size % sblock.fs_fpg == 0)
			break;
		sblock.fs_fpg -= sblock.fs_frag;
		sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode),
		    INOPB(&sblock));
	}
	if (optimalfpg != sblock.fs_fpg)
		printf("Reduced frags per cylinder group from %d to %d %s\n",
		   optimalfpg, sblock.fs_fpg, "to enlarge last cyl group");
	sblock.fs_cgsize = fragroundup(&sblock, CGSIZE(&sblock));
	sblock.fs_dblkno = sblock.fs_iblkno + sblock.fs_ipg / INOPF(&sblock);
	if (Oflag <= 1) {
		sblock.fs_old_spc = sblock.fs_fpg * sblock.fs_old_nspf;
		sblock.fs_old_nsect = sblock.fs_old_spc;
		sblock.fs_old_npsect = sblock.fs_old_spc;
		sblock.fs_old_ncyl = sblock.fs_ncg;
	}

	/*
	 * fill in remaining fields of the super block
	 */
	sblock.fs_csaddr = cgdmin(&sblock, 0);
	sblock.fs_cssize =
	    fragroundup(&sblock, sblock.fs_ncg * sizeof(struct csum));

	/*
	 * Setup memory for temporary in-core cylgroup summaries.
	 * Cribbed from ffs_mountfs().
	 */
	size = sblock.fs_cssize;
	blks = howmany(size, sblock.fs_fsize);
	if (sblock.fs_contigsumsize > 0)
		size += sblock.fs_ncg * sizeof(int32_t);
	if ((space = (char *)calloc(1, size)) == NULL)
		err(1, "memory allocation error for cg summaries");
	sblock.fs_csp = space;
	space = (char *)space + sblock.fs_cssize;
	if (sblock.fs_contigsumsize > 0) {
		int32_t *lp;

		sblock.fs_maxcluster = lp = space;
		for (i = 0; i < sblock.fs_ncg; i++)
		*lp++ = sblock.fs_contigsumsize;
	}

	sblock.fs_sbsize = fragroundup(&sblock, sizeof(struct fs));
	if (sblock.fs_sbsize > SBLOCKSIZE)
		sblock.fs_sbsize = SBLOCKSIZE;
	sblock.fs_minfree = minfree;
	sblock.fs_maxcontig = maxcontig;
	sblock.fs_maxbpg = maxbpg;
	sblock.fs_optim = opt;
	sblock.fs_cgrotor = 0;
	sblock.fs_pendingblocks = 0;
	sblock.fs_pendinginodes = 0;
	sblock.fs_cstotal.cs_ndir = 0;
	sblock.fs_cstotal.cs_nbfree = 0;
	sblock.fs_cstotal.cs_nifree = 0;
	sblock.fs_cstotal.cs_nffree = 0;
	sblock.fs_fmod = 0;
	sblock.fs_ronly = 0;
	sblock.fs_state = 0;
	sblock.fs_clean = FS_ISCLEAN;
	sblock.fs_ronly = 0;
	sblock.fs_id[0] = start_time.tv_sec;
	sblock.fs_id[1] = random();
	sblock.fs_fsmnt[0] = '\0';
	csfrags = howmany(sblock.fs_cssize, sblock.fs_fsize);
	sblock.fs_dsize = sblock.fs_size - sblock.fs_sblkno -
	    sblock.fs_ncg * (sblock.fs_dblkno - sblock.fs_sblkno);
	sblock.fs_cstotal.cs_nbfree =
	    fragstoblks(&sblock, sblock.fs_dsize) -
	    howmany(csfrags, sblock.fs_frag);
	sblock.fs_cstotal.cs_nffree =
	    fragnum(&sblock, sblock.fs_size) +
	    (fragnum(&sblock, csfrags) > 0 ?
	    sblock.fs_frag - fragnum(&sblock, csfrags) : 0);
	sblock.fs_cstotal.cs_nifree = sblock.fs_ncg * sblock.fs_ipg - ROOTINO;
	sblock.fs_cstotal.cs_ndir = 0;
	sblock.fs_dsize -= csfrags;
	sblock.fs_time = start_time.tv_sec;
	if (Oflag <= 1) {
		sblock.fs_old_time = start_time.tv_sec;
		sblock.fs_old_dsize = sblock.fs_dsize;
		sblock.fs_old_csaddr = sblock.fs_csaddr;
		sblock.fs_old_cstotal.cs_ndir = sblock.fs_cstotal.cs_ndir;
		sblock.fs_old_cstotal.cs_nbfree = sblock.fs_cstotal.cs_nbfree;
		sblock.fs_old_cstotal.cs_nifree = sblock.fs_cstotal.cs_nifree;
		sblock.fs_old_cstotal.cs_nffree = sblock.fs_cstotal.cs_nffree;
	}
	/*
	 * Dump out summary information about file system.
	 */
#define	B2MBFACTOR (1 / (1024.0 * 1024.0))
	printf("%s: %.1fMB (%lld sectors) block size %d, "
	       "fragment size %d\n",
	    fsys, (float)sblock.fs_size * sblock.fs_fsize * B2MBFACTOR,
	    (long long)fsbtodb(&sblock, sblock.fs_size),
	    sblock.fs_bsize, sblock.fs_fsize);
	printf("\tusing %d cylinder groups of %.2fMB, %d blks, "
	       "%d inodes.\n",
	    sblock.fs_ncg,
	    (float)sblock.fs_fpg * sblock.fs_fsize * B2MBFACTOR,
	    sblock.fs_fpg / sblock.fs_frag, sblock.fs_ipg);
#undef B2MBFACTOR
	/*
	 * Now determine how wide each column will be, and calculate how
	 * many columns will fit in a 76 char line. 76 is the width of the
	 * subwindows in sysinst.
	 */
	printcolwidth = count_digits(
			fsbtodb(&sblock, cgsblock(&sblock, sblock.fs_ncg -1)));
	nprintcols = 76 / (printcolwidth + 2);

	/*
	 * allocate space for superblock, cylinder group map, and
	 * two sets of inode blocks.
	 */
	if (sblock.fs_bsize < SBLOCKSIZE)
		iobufsize = SBLOCKSIZE + 3 * sblock.fs_bsize;
	else
		iobufsize = 4 * sblock.fs_bsize;
	if ((iobuf = malloc(iobufsize)) == 0) {
		printf("Cannot allocate I/O buffer\n");
		exit(38);
	}
	memset(iobuf, 0, iobufsize);
	/*
	 * Make a copy of the superblock into the buffer that we will be
	 * writing out in each cylinder group.
	 */
	memcpy(writebuf, &sblock, sbsize);
	if (fsopts->needswap)
		ffs_sb_swap(&sblock, (struct fs*)writebuf);
	memcpy(iobuf, writebuf, SBLOCKSIZE);

	printf("super-block backups (for fsck -b #) at:");
	for (cylno = 0; cylno < sblock.fs_ncg; cylno++) {
		initcg(cylno, start_time.tv_sec, fsopts);
		if (cylno % nprintcols == 0)
			printf("\n");
		printf(" %*lld,", printcolwidth,
			(long long)fsbtodb(&sblock, cgsblock(&sblock, cylno)));
		fflush(stdout);
	}
	printf("\n");

	/*
	 * Now construct the initial file system,
	 * then write out the super-block.
	 */
	sblock.fs_time = start_time.tv_sec;
	if (Oflag <= 1) {
		sblock.fs_old_cstotal.cs_ndir = sblock.fs_cstotal.cs_ndir;
		sblock.fs_old_cstotal.cs_nbfree = sblock.fs_cstotal.cs_nbfree;
		sblock.fs_old_cstotal.cs_nifree = sblock.fs_cstotal.cs_nifree;
		sblock.fs_old_cstotal.cs_nffree = sblock.fs_cstotal.cs_nffree;
	}
	if (fsopts->needswap)
		sblock.fs_flags |= FS_SWAPPED;
	ffs_write_superblock(&sblock, fsopts);
	return (&sblock);
}
Esempio n. 9
0
/*
 * Look up a FFS dinode number to find its incore vnode, otherwise read it
 * in from disk.  If it is in core, wait for the lock bit to clear, then
 * return the inode locked.  Detection and handling of mount points must be
 * done by the calling routine.
 */
int
ffs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
{
	struct fs *fs;
	struct inode *ip;
	struct ufs1_dinode *dp1;
#ifdef FFS2
	struct ufs2_dinode *dp2;
#endif
	struct ufsmount *ump;
	struct buf *bp;
	struct vnode *vp;
	dev_t dev;
	int error;

	if (ino > (ufsino_t)-1)
		panic("ffs_vget: alien ino_t %llu", (unsigned long long)ino);

	ump = VFSTOUFS(mp);
	dev = ump->um_dev;
retry:
	if ((*vpp = ufs_ihashget(dev, ino)) != NULL)
		return (0);

	/* Allocate a new vnode/inode. */
	if ((error = getnewvnode(VT_UFS, mp, &ffs_vops, &vp)) != 0) {
		*vpp = NULL;
		return (error);
	}

#ifdef VFSLCKDEBUG
	vp->v_flag |= VLOCKSWORK;
#endif
	ip = pool_get(&ffs_ino_pool, PR_WAITOK|PR_ZERO);
	lockinit(&ip->i_lock, PINOD, "inode", 0, 0);
	ip->i_ump = ump;
	vref(ip->i_devvp);
	vp->v_data = ip;
	ip->i_vnode = vp;
	ip->i_fs = fs = ump->um_fs;
	ip->i_dev = dev;
	ip->i_number = ino;
	ip->i_vtbl = &ffs_vtbl;

	/*
	 * Put it onto its hash chain and lock it so that other requests for
	 * this inode will block if they arrive while we are sleeping waiting
	 * for old data structures to be purged or for the contents of the
	 * disk portion of this inode to be read.
	 */
	error = ufs_ihashins(ip);
	
	if (error) {
		/*
		 * VOP_INACTIVE will treat this as a stale file
		 * and recycle it quickly
		 */
		vrele(vp);

		if (error == EEXIST)
			goto retry;

		return (error);
	}


	/* Read in the disk contents for the inode, copy into the inode. */
	error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
	    (int)fs->fs_bsize, &bp);
	if (error) {
		/*
		 * The inode does not contain anything useful, so it would
		 * be misleading to leave it on its hash chain. With mode
		 * still zero, it will be unlinked and returned to the free
		 * list by vput().
		 */
		vput(vp);
		brelse(bp);
		*vpp = NULL;
		return (error);
	}

#ifdef FFS2
	if (ip->i_ump->um_fstype == UM_UFS2) {
		ip->i_din2 = pool_get(&ffs_dinode2_pool, PR_WAITOK);
		dp2 = (struct ufs2_dinode *) bp->b_data + ino_to_fsbo(fs, ino);
		*ip->i_din2 = *dp2;
	} else
#endif
	{
		ip->i_din1 = pool_get(&ffs_dinode1_pool, PR_WAITOK);
		dp1 = (struct ufs1_dinode *) bp->b_data + ino_to_fsbo(fs, ino);
		*ip->i_din1 = *dp1;
	}

	brelse(bp);

	if (DOINGSOFTDEP(vp))
		softdep_load_inodeblock(ip);
	else
		ip->i_effnlink = DIP(ip, nlink);

	/*
	 * Initialize the vnode from the inode, check for aliases.
	 * Note that the underlying vnode may have changed.
	 */
	error = ufs_vinit(mp, &ffs_specvops, FFS_FIFOOPS, &vp);
	if (error) {
		vput(vp);
		*vpp = NULL;
		return (error);
	}

	/*
	 * Set up a generation number for this inode if it does not
	 * already have one. This should only happen on old filesystems.
	 */
	if (DIP(ip, gen) == 0) {
		DIP_ASSIGN(ip, gen, arc4random() & INT_MAX);
		if (DIP(ip, gen) == 0 || DIP(ip, gen) == -1)
			DIP_ASSIGN(ip, gen, 1);	/* Shouldn't happen */
		if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0)
			ip->i_flag |= IN_MODIFIED;
	}

	/*
	 * Ensure that uid and gid are correct. This is a temporary
	 * fix until fsck has been changed to do the update.
	 */
	if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_inodefmt < FS_44INODEFMT) {
		ip->i_ffs1_uid = ip->i_din1->di_ouid;
		ip->i_ffs1_gid = ip->i_din1->di_ogid;
	}

	*vpp = vp;

	return (0);
}
Esempio n. 10
0
/*
 * Update the access, modified, and inode change times as specified by the
 * IN_ACCESS, IN_UPDATE, and IN_CHANGE flags respectively. The IN_MODIFIED
 * flag is used to specify that the inode needs to be updated but that the
 * times have already been set. The access and modified times are taken from
 * the second and third parameters; the inode change time is always taken
 * from the current time. If waitfor is set, then wait for the disk write
 * of the inode to complete.
 */
int
ffs_update(struct inode *ip, struct timespec *atime, 
    struct timespec *mtime, int waitfor)
{
	struct vnode *vp;
	struct fs *fs;
	struct buf *bp;
	int error;
	struct timespec ts;

	vp = ITOV(ip);
	if (vp->v_mount->mnt_flag & MNT_RDONLY) {
		ip->i_flag &=
		    ~(IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE);
		return (0);
	}

	if ((ip->i_flag &
	    (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
	    waitfor != MNT_WAIT)
		return (0);

	getnanotime(&ts);

	if (ip->i_flag & IN_ACCESS) {
		DIP_ASSIGN(ip, atime, atime ? atime->tv_sec : ts.tv_sec);
		DIP_ASSIGN(ip, atimensec, atime ? atime->tv_nsec : ts.tv_nsec);
	}

	if (ip->i_flag & IN_UPDATE) {
		DIP_ASSIGN(ip, mtime, mtime ? mtime->tv_sec : ts.tv_sec);
		DIP_ASSIGN(ip, mtimensec, mtime ? mtime->tv_nsec : ts.tv_nsec);
		ip->i_modrev++;
	}

	if (ip->i_flag & IN_CHANGE) {
		DIP_ASSIGN(ip, ctime, ts.tv_sec);
		DIP_ASSIGN(ip, ctimensec, ts.tv_nsec);
	}

	ip->i_flag &= ~(IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE);
	fs = ip->i_fs;

	/*
	 * Ensure that uid and gid are correct. This is a temporary
	 * fix until fsck has been changed to do the update.
	 */
	if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_inodefmt < FS_44INODEFMT) {
		ip->i_din1->di_ouid = ip->i_ffs1_uid;
		ip->i_din1->di_ogid = ip->i_ffs1_gid;
	}

	error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
		(int)fs->fs_bsize, NOCRED, &bp);
	if (error) {
		brelse(bp);
		return (error);
	}

	if (DOINGSOFTDEP(vp))
		softdep_update_inodeblock(ip, bp, waitfor);
	else if (ip->i_effnlink != DIP(ip, nlink))
		panic("ffs_update: bad link cnt");

#ifdef FFS2
	if (ip->i_ump->um_fstype == UM_UFS2)
		*((struct ufs2_dinode *)bp->b_data +
		    ino_to_fsbo(fs, ip->i_number)) = *ip->i_din2;
	else
#endif
		*((struct ufs1_dinode *)bp->b_data +
		    ino_to_fsbo(fs, ip->i_number)) = *ip->i_din1;

	if (waitfor && !DOINGASYNC(vp)) {
		return (bwrite(bp));
	} else {
		bdwrite(bp);
		return (0);
	}
}
Esempio n. 11
0
int
ufs2_dir (char *dirname)
{
  char *rest, ch;
  unsigned long block, off, loc, ino = ROOTINO;
  grub_int64_t map;
  struct direct *dp;
  int j, k;
  char ch1;
#ifdef GRUB_UTIL
  char tmp_name[512];
#else
  char *tmp_name = (char *)(NAME_BUF);	/* MAXNAMLEN is 255, so 512 byte buffer is needed. */
#endif

/* main loop to find destination inode */
loop:

  /* load current inode (defaults to the root inode) */

    if (!devread (fsbtodb (SUPERBLOCK, ino_to_fsba (SUPERBLOCK, ino)),
	    ino % (SUPERBLOCK->fs_inopb) * sizeof (struct ufs2_dinode),
	    sizeof (struct ufs2_dinode), (char *) INODE_UFS2, 0xedde0d90))
		    return 0;			/* XXX what return value? */

  /* if we have a real file (and we're not just printing possibilities),
     then this is where we want to exit */

  if (!*dirname || isspace (*dirname))
    {
      if ((INODE_UFS2->di_mode & IFMT) != IFREG)
	{
	  errnum = ERR_BAD_FILETYPE;
	  return 0;
	}

      filemax = INODE_UFS2->di_size;

      /* incomplete implementation requires this! */
      fsmax = (NDADDR + NINDIR (SUPERBLOCK)) * SUPERBLOCK->fs_bsize;
      return 1;
    }

  /* continue with file/directory name interpretation */

  while (*dirname == '/')
    dirname++;

  if (!(INODE_UFS2->di_size) || ((INODE_UFS2->di_mode & IFMT) != IFDIR))
    {
      errnum = ERR_BAD_FILETYPE;
      return 0;
    }

  //for (rest = dirname; (ch = *rest) && !isspace (ch) && ch != '/'; rest++);
  for (rest = dirname; (ch = *rest) && !isspace (ch) && ch != '/'; rest++)
  {
	if (ch == '\\')
	{
		rest++;
		if (! (ch = *rest))
			break;
	}
  }

  *rest = 0;
  loc = 0;

  /* loop for reading a the entries in a directory */

  do
    {
      if (loc >= INODE_UFS2->di_size)
	{
	  if (print_possibilities < 0)
	    return 1;

	  errnum = ERR_FILE_NOT_FOUND;
	  *rest = ch;
	  return 0;
	}

      if (!(off = blkoff (SUPERBLOCK, loc)))
	{
	  block = lblkno (SUPERBLOCK, loc);

	  if ((map = block_map (block)) < 0
	      || !devread (fsbtodb (SUPERBLOCK, map), 0,
			   blksize (SUPERBLOCK, INODE_UFS2, block),
			   (char *) FSYS_BUF, 0xedde0d90))
	    {
	      errnum = ERR_FSYS_CORRUPT;
	      *rest = ch;
	      return 0;
	    }
	}

      dp = (struct direct *) (FSYS_BUF + off);
      loc += dp->d_reclen;

	/* copy dp->name to tmp_name, and quote the spaces with a '\\' */
	for (j = 0, k = 0; j < dp->d_namlen; j++)
	{
		if (! (ch1 = dp->d_name[j]))
			break;
		if (ch1 == ' ')
			tmp_name[k++] = '\\';
		tmp_name[k++] = ch1;
	}
	tmp_name[k] = 0;

#ifndef STAGE1_5
      if (dp->d_ino && print_possibilities && ch != '/'
	  && (!*dirname || substring (dirname, tmp_name, 0) <= 0))
	{
	  if (print_possibilities > 0)
	    print_possibilities = -print_possibilities;

	  print_a_completion (tmp_name);
	}
#endif /* STAGE1_5 */
    }
  while (!dp->d_ino || (substring (dirname, dp->d_name, 0) != 0
			|| (print_possibilities && ch != '/')));

  /* only get here if we have a matching directory entry */

  ino = dp->d_ino;
  *(dirname = rest) = ch;

  /* go back to main loop at top of function */
  goto loop;
}
Esempio n. 12
0
/*
 * Release blocks associated with the inode ip and stored in the indirect
 * block bn.  Blocks are free'd in LIFO order up to (but not including)
 * lastbn.  If level is greater than SINGLE, the block is an indirect block
 * and recursive calls to indirtrunc must be used to cleanse other indirect
 * blocks.
 *
 * NB: triple indirect blocks are untested.
 */
int
ffs_indirtrunc(struct inode *ip, daddr64_t lbn, daddr64_t dbn,
    daddr64_t lastbn, int level, long *countp)
{
	int i;
	struct buf *bp;
	struct fs *fs = ip->i_fs;
	struct vnode *vp;
	void *copy = NULL;
	daddr64_t nb, nlbn, last;
	long blkcount, factor;
	int nblocks, blocksreleased = 0;
	int error = 0, allerror = 0;
	int32_t *bap1 = NULL;
#ifdef FFS2
	int64_t *bap2 = NULL;
#endif

	/*
	 * Calculate index in current block of last
	 * block to be kept.  -1 indicates the entire
	 * block so we need not calculate the index.
	 */
	factor = 1;
	for (i = SINGLE; i < level; i++)
		factor *= NINDIR(fs);
	last = lastbn;
	if (lastbn > 0)
		last /= factor;
	nblocks = btodb(fs->fs_bsize);
	/*
	 * Get buffer of block pointers, zero those entries corresponding
	 * to blocks to be free'd, and update on disk copy first.  Since
	 * double(triple) indirect before single(double) indirect, calls
	 * to bmap on these blocks will fail.  However, we already have
	 * the on disk address, so we have to set the b_blkno field
	 * explicitly instead of letting bread do everything for us.
	 */
	vp = ITOV(ip);
	bp = getblk(vp, lbn, (int)fs->fs_bsize, 0, 0);
	if (!(bp->b_flags & (B_DONE | B_DELWRI))) {
		curproc->p_stats->p_ru.ru_inblock++;	/* pay for read */
		bcstats.pendingreads++;
		bcstats.numreads++;
		bp->b_flags |= B_READ;
		if (bp->b_bcount > bp->b_bufsize)
			panic("ffs_indirtrunc: bad buffer size");
		bp->b_blkno = dbn;
		VOP_STRATEGY(bp);
		error = biowait(bp);
	}
	if (error) {
		brelse(bp);
		*countp = 0;
		return (error);
	}

#ifdef FFS2
	if (ip->i_ump->um_fstype == UM_UFS2)
		bap2 = (int64_t *)bp->b_data;
	else
#endif
		bap1 = (int32_t *)bp->b_data;

	if (lastbn != -1) {
		copy = malloc(fs->fs_bsize, M_TEMP, M_WAITOK);
		bcopy(bp->b_data, copy, (u_int) fs->fs_bsize);

		for (i = last + 1; i < NINDIR(fs); i++)
			BAP_ASSIGN(ip, i, 0);

		if (!DOINGASYNC(vp)) {
			error = bwrite(bp);
			if (error)
				allerror = error;
		} else {
			bawrite(bp);
		}

#ifdef FFS2
		if (ip->i_ump->um_fstype == UM_UFS2)
			bap2 = (int64_t *)copy;
		else
#endif
			bap1 = (int32_t *)copy;
	}

	/*
	 * Recursively free totally unused blocks.
	 */
	for (i = NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last;
	    i--, nlbn += factor) {
		nb = BAP(ip, i);
		if (nb == 0)
			continue;
		if (level > SINGLE) {
			error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb),
					       (daddr64_t)-1, level - 1,
					       &blkcount);
			if (error)
				allerror = error;
			blocksreleased += blkcount;
		}
		ffs_blkfree(ip, nb, fs->fs_bsize);
		blocksreleased += nblocks;
	}

	/*
	 * Recursively free last partial block.
	 */
	if (level > SINGLE && lastbn >= 0) {
		last = lastbn % factor;
		nb = BAP(ip, i);
		if (nb != 0) {
			error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb),
					       last, level - 1, &blkcount);
			if (error)
				allerror = error;
			blocksreleased += blkcount;
		}
	}
	if (copy != NULL) {
		free(copy, M_TEMP);
	} else {
		bp->b_flags |= B_INVAL;
		brelse(bp);
	}
		
	*countp = blocksreleased;
	return (allerror);
}
Esempio n. 13
0
/*
 * Truncate the inode oip to at most length size, freeing the
 * disk blocks.
 */
int
ffs_truncate(struct inode *oip, off_t length, int flags, struct ucred *cred)
{
	struct vnode *ovp;
	daddr64_t lastblock, datablocks;
	daddr64_t bn, lbn, lastiblock[NIADDR], indir_lbn[NIADDR];
	daddr64_t oldblks[NDADDR + NIADDR], newblks[NDADDR + NIADDR];
	struct fs *fs;
	struct buf *bp;
	int offset, size, level;
	long count, nblocks, vflags, blocksreleased = 0;
	int i, aflags, error, allerror, needextclean = 0;
	off_t osize;
#ifdef FFS2
	daddr64_t extblocks;
	int softdepslowdown;
#endif

	if (length < 0)
		return (EINVAL);

	ovp = ITOV(oip);
	fs = oip->i_fs;

	if (ovp->v_type != VREG &&
	    ovp->v_type != VDIR &&
	    ovp->v_type != VLNK)
		return (0);

	/*
	 * Historically clients did not have to specify which data they were
	 * truncating. So, if not specified, we assume traditional behavior,
	 * e.g., just the normal data.
	 */
	if ((flags & (IO_EXT | IO_NORMAL)) == 0)
		flags |= IO_NORMAL;

	if (DIP(oip, size) == length && !(flags & IO_EXT))
		return (0);

	datablocks = DIP(oip, blocks);

#ifdef FFS2
	/*
	 * If we are truncating the extended-attributes, and cannot do it with
	 * soft updates, then do it slowly here. If we are truncating both the
	 * extended attributes and the file contents (e.g., the file is being
	 * unlinked), then pick it off with soft updates below.
	 */
	needextclean = 0;
	softdepslowdown = DOINGSOFTDEP(ovp) && softdep_slowdown(ovp);
	extblocks = 0;
	if (fs->fs_magic == FS_UFS2_MAGIC && oip->i_ffs2_extsize > 0) {
		extblocks = btodb(fragroundup(fs, oip->i_ffs2_extsize));
		datablocks -= extblocks;
	}
	if ((flags & IO_EXT) && extblocks > 0) {
		if (DOINGSOFTDEP(ovp) && softdepslowdown == 0 && length == 0) {
			if ((flags & IO_NORMAL) == 0) {
				softdep_setup_freeblocks(oip, length, IO_EXT);
				return (0);
			}
			needextclean = 1;
		} else {
#ifdef DIAGNOSTIC
			if (length != 0)
				panic("ffs_truncate: partial truncation of "
				    "extended attributes");
#endif
			error = VOP_FSYNC(ovp, cred, MNT_WAIT, curproc);
			if (error)
				return (error);
			osize = oip->i_ffs2_extsize;
			oip->i_ffs2_blocks -= extblocks;
			(void)ufs_quota_free_blocks(oip, extblocks, NOCRED);
			(void) vinvalbuf(ovp, V_EXT, cred, curproc, 0, 0);
			oip->i_ffs2_extsize = 0;
			for (i = 0; i < NXADDR; i++) {
				oldblks[i] = oip->i_ffs2_extb[i];
				oip->i_ffs2_extb[i] = 0;
			}
			oip->i_flag |= IN_CHANGE | IN_UPDATE;
			error = UFS_UPDATE(oip, MNT_WAIT);
			if (error)
				return (error);
			for (i = 0; i < NXADDR; i++) {
				if (oldblks[i] == 0)
					continue;
				ffs_blkfree(oip, oldblks[i],
				    sblksize(fs, osize, i));
			}
		}
	}

	if (!(flags & IO_NORMAL))
		return (0); /* Nothing else to do. */
#endif /* FFS2 */

	if (ovp->v_type == VLNK &&
	    (DIP(oip, size) < ovp->v_mount->mnt_maxsymlinklen ||
	     (ovp->v_mount->mnt_maxsymlinklen == 0 &&
	      datablocks == 0))) {
#ifdef DIAGNOSTIC
		if (length != 0)
			panic("ffs_truncate: partial truncate of symlink");
#endif
		memset(SHORTLINK(oip), 0, (size_t) DIP(oip, size));
		DIP_ASSIGN(oip, size, 0);
		oip->i_flag |= IN_CHANGE | IN_UPDATE;
#ifdef FFS2
		if (needextclean)
			softdep_setup_freeblocks(oip, length, IO_EXT);
#endif
		return (UFS_UPDATE(oip, MNT_WAIT));
	}

	if ((error = getinoquota(oip)) != 0)
		return (error);

	uvm_vnp_setsize(ovp, length);
	oip->i_ci.ci_lasta = oip->i_ci.ci_clen 
	    = oip->i_ci.ci_cstart = oip->i_ci.ci_lastw = 0;

	if (DOINGSOFTDEP(ovp)) {
		if (length > 0 || softdep_slowdown(ovp)) {
			/*
			 * If a file is only partially truncated, then
			 * we have to clean up the data structures
			 * describing the allocation past the truncation
			 * point. Finding and deallocating those structures
			 * is a lot of work. Since partial truncation occurs
			 * rarely, we solve the problem by syncing the file
			 * so that it will have no data structures left.
			 */
			if ((error = VOP_FSYNC(ovp, cred, MNT_WAIT,
					       curproc)) != 0)
				return (error);
		} else {
			(void)ufs_quota_free_blocks(oip, datablocks, NOCRED);
			softdep_setup_freeblocks(oip, length, needextclean ?
			    IO_EXT | IO_NORMAL : IO_NORMAL);
			(void) vinvalbuf(ovp, needextclean ? 0 : V_NORMAL,
			    cred, curproc, 0, 0);
			oip->i_flag |= IN_CHANGE | IN_UPDATE;
			return (UFS_UPDATE(oip, 0));
		}
	}

	osize = DIP(oip, size);
	/*
	 * Lengthen the size of the file. We must ensure that the
	 * last byte of the file is allocated. Since the smallest
	 * value of osize is 0, length will be at least 1.
	 */
	if (osize < length) {
		if (length > fs->fs_maxfilesize)
			return (EFBIG);
		aflags = B_CLRBUF;
		if (flags & IO_SYNC)
			aflags |= B_SYNC;
		error = UFS_BUF_ALLOC(oip, length - 1, 1, 
				   cred, aflags, &bp);
		if (error)
			return (error);
		DIP_ASSIGN(oip, size, length);
		uvm_vnp_setsize(ovp, length);
		(void) uvm_vnp_uncache(ovp);
		if (aflags & B_SYNC)
			bwrite(bp);
		else
			bawrite(bp);
		oip->i_flag |= IN_CHANGE | IN_UPDATE;
		return (UFS_UPDATE(oip, MNT_WAIT));
	}
	uvm_vnp_setsize(ovp, length);

	/*
	 * Shorten the size of the file. If the file is not being
	 * truncated to a block boundary, the contents of the
	 * partial block following the end of the file must be
	 * zero'ed in case it ever becomes accessible again because
	 * of subsequent file growth. Directories however are not
	 * zero'ed as they should grow back initialized to empty.
	 */
	offset = blkoff(fs, length);
	if (offset == 0) {
		DIP_ASSIGN(oip, size, length);
	} else {
		lbn = lblkno(fs, length);
		aflags = B_CLRBUF;
		if (flags & IO_SYNC)
			aflags |= B_SYNC;
		error = UFS_BUF_ALLOC(oip, length - 1, 1,
				   cred, aflags, &bp);
		if (error)
			return (error);
		/*
		 * When we are doing soft updates and the UFS_BALLOC
		 * above fills in a direct block hole with a full sized
		 * block that will be truncated down to a fragment below,
		 * we must flush out the block dependency with an FSYNC
		 * so that we do not get a soft updates inconsistency
		 * when we create the fragment below.
		 */
		if (DOINGSOFTDEP(ovp) && lbn < NDADDR &&
		    fragroundup(fs, blkoff(fs, length)) < fs->fs_bsize &&
		    (error = VOP_FSYNC(ovp, cred, MNT_WAIT, curproc)) != 0)
			return (error);
		DIP_ASSIGN(oip, size, length);
		size = blksize(fs, oip, lbn);
		(void) uvm_vnp_uncache(ovp);
		if (ovp->v_type != VDIR)
			bzero((char *)bp->b_data + offset,
			      (u_int)(size - offset));
		bp->b_bcount = size;
		if (aflags & B_SYNC)
			bwrite(bp);
		else
			bawrite(bp);
	}
	/*
	 * Calculate index into inode's block list of
	 * last direct and indirect blocks (if any)
	 * which we want to keep.  Lastblock is -1 when
	 * the file is truncated to 0.
	 */
	lastblock = lblkno(fs, length + fs->fs_bsize - 1) - 1;
	lastiblock[SINGLE] = lastblock - NDADDR;
	lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs);
	lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs);
	nblocks = btodb(fs->fs_bsize);

	/*
	 * Update file and block pointers on disk before we start freeing
	 * blocks.  If we crash before free'ing blocks below, the blocks
	 * will be returned to the free list.  lastiblock values are also
	 * normalized to -1 for calls to ffs_indirtrunc below.
	 */
	for (level = TRIPLE; level >= SINGLE; level--) {
		oldblks[NDADDR + level] = DIP(oip, ib[level]);
		if (lastiblock[level] < 0) {
			DIP_ASSIGN(oip, ib[level], 0);
			lastiblock[level] = -1;
		}
	}

	for (i = 0; i < NDADDR; i++) {
		oldblks[i] = DIP(oip, db[i]);
		if (i > lastblock)
			DIP_ASSIGN(oip, db[i], 0);
	}

	oip->i_flag |= IN_CHANGE | IN_UPDATE;
	if ((error = UFS_UPDATE(oip, MNT_WAIT)) != 0)
		allerror = error;

	/*
	 * Having written the new inode to disk, save its new configuration
	 * and put back the old block pointers long enough to process them.
	 * Note that we save the new block configuration so we can check it
	 * when we are done.
	 */
	for (i = 0; i < NDADDR; i++) {
		newblks[i] = DIP(oip, db[i]);
		DIP_ASSIGN(oip, db[i], oldblks[i]);
	}

	for (i = 0; i < NIADDR; i++) {
		newblks[NDADDR + i] = DIP(oip, ib[i]);
		DIP_ASSIGN(oip, ib[i], oldblks[NDADDR + i]);
	}

	DIP_ASSIGN(oip, size, osize);
	vflags = ((length > 0) ? V_SAVE : 0) | V_SAVEMETA;
	allerror = vinvalbuf(ovp, vflags, cred, curproc, 0, 0);

	/*
	 * Indirect blocks first.
	 */
	indir_lbn[SINGLE] = -NDADDR;
	indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1;
	indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1;
	for (level = TRIPLE; level >= SINGLE; level--) {
		bn = DIP(oip, ib[level]);
		if (bn != 0) {
			error = ffs_indirtrunc(oip, indir_lbn[level],
			    fsbtodb(fs, bn), lastiblock[level], level, &count);
			if (error)
				allerror = error;
			blocksreleased += count;
			if (lastiblock[level] < 0) {
				DIP_ASSIGN(oip, ib[level], 0);
				ffs_blkfree(oip, bn, fs->fs_bsize);
				blocksreleased += nblocks;
			}
		}
		if (lastiblock[level] >= 0)
			goto done;
	}

	/*
	 * All whole direct blocks or frags.
	 */
	for (i = NDADDR - 1; i > lastblock; i--) {
		long bsize;

		bn = DIP(oip, db[i]);
		if (bn == 0)
			continue;

		DIP_ASSIGN(oip, db[i], 0);
		bsize = blksize(fs, oip, i);
		ffs_blkfree(oip, bn, bsize);
		blocksreleased += btodb(bsize);
	}
	if (lastblock < 0)
		goto done;

	/*
	 * Finally, look for a change in size of the
	 * last direct block; release any frags.
	 */
	bn = DIP(oip, db[lastblock]);
	if (bn != 0) {
		long oldspace, newspace;

		/*
		 * Calculate amount of space we're giving
		 * back as old block size minus new block size.
		 */
		oldspace = blksize(fs, oip, lastblock);
		DIP_ASSIGN(oip, size, length);
		newspace = blksize(fs, oip, lastblock);
		if (newspace == 0)
			panic("ffs_truncate: newspace");
		if (oldspace - newspace > 0) {
			/*
			 * Block number of space to be free'd is
			 * the old block # plus the number of frags
			 * required for the storage we're keeping.
			 */
			bn += numfrags(fs, newspace);
			ffs_blkfree(oip, bn, oldspace - newspace);
			blocksreleased += btodb(oldspace - newspace);
		}
	}
done:
#ifdef DIAGNOSTIC
	for (level = SINGLE; level <= TRIPLE; level++)
		if (newblks[NDADDR + level] != DIP(oip, ib[level]))
			panic("ffs_truncate1");
	for (i = 0; i < NDADDR; i++)
		if (newblks[i] != DIP(oip, db[i]))
			panic("ffs_truncate2");
#endif /* DIAGNOSTIC */
	/*
	 * Put back the real size.
	 */
	DIP_ASSIGN(oip, size, length);
	DIP_ADD(oip, blocks, -blocksreleased);
	if (DIP(oip, blocks) < 0)	/* Sanity */
		DIP_ASSIGN(oip, blocks, 0);
	oip->i_flag |= IN_CHANGE;
	(void)ufs_quota_free_blocks(oip, blocksreleased, NOCRED);
	return (allerror);
}
Esempio n. 14
0
void
fsinit(time_t utime)
{
	union dinode node;
	struct group *grp;
	gid_t gid;
	int entries;

	memset(&node, 0, sizeof node);
	if ((grp = getgrnam("operator")) != NULL) {
		gid = grp->gr_gid;
	} else {
		warnx("Cannot retrieve operator gid, using gid 0.");
		gid = 0;
	}
	entries = (nflag) ? ROOTLINKCNT - 1: ROOTLINKCNT;
	if (sblock.fs_magic == FS_UFS1_MAGIC) {
		/*
		 * initialize the node
		 */
		node.dp1.di_atime = utime;
		node.dp1.di_mtime = utime;
		node.dp1.di_ctime = utime;
		/*
		 * create the root directory
		 */
		node.dp1.di_mode = IFDIR | UMASK;
		node.dp1.di_nlink = entries;
		node.dp1.di_size = makedir(root_dir, entries);
		node.dp1.di_db[0] = alloc(sblock.fs_fsize, node.dp1.di_mode);
		node.dp1.di_blocks =
		    btodb(fragroundup(&sblock, node.dp1.di_size));
		wtfs(fsbtodb(&sblock, node.dp1.di_db[0]), sblock.fs_fsize,
		    iobuf);
		iput(&node, UFS_ROOTINO);
		if (!nflag) {
			/*
			 * create the .snap directory
			 */
			node.dp1.di_mode |= 020;
			node.dp1.di_gid = gid;
			node.dp1.di_nlink = SNAPLINKCNT;
			node.dp1.di_size = makedir(snap_dir, SNAPLINKCNT);
				node.dp1.di_db[0] =
				    alloc(sblock.fs_fsize, node.dp1.di_mode);
			node.dp1.di_blocks =
			    btodb(fragroundup(&sblock, node.dp1.di_size));
				wtfs(fsbtodb(&sblock, node.dp1.di_db[0]),
				    sblock.fs_fsize, iobuf);
			iput(&node, UFS_ROOTINO + 1);
		}
	} else {
		/*
		 * initialize the node
		 */
		node.dp2.di_atime = utime;
		node.dp2.di_mtime = utime;
		node.dp2.di_ctime = utime;
		node.dp2.di_birthtime = utime;
		/*
		 * create the root directory
		 */
		node.dp2.di_mode = IFDIR | UMASK;
		node.dp2.di_nlink = entries;
		node.dp2.di_size = makedir(root_dir, entries);
		node.dp2.di_db[0] = alloc(sblock.fs_fsize, node.dp2.di_mode);
		node.dp2.di_blocks =
		    btodb(fragroundup(&sblock, node.dp2.di_size));
		wtfs(fsbtodb(&sblock, node.dp2.di_db[0]), sblock.fs_fsize,
		    iobuf);
		iput(&node, UFS_ROOTINO);
		if (!nflag) {
			/*
			 * create the .snap directory
			 */
			node.dp2.di_mode |= 020;
			node.dp2.di_gid = gid;
			node.dp2.di_nlink = SNAPLINKCNT;
			node.dp2.di_size = makedir(snap_dir, SNAPLINKCNT);
				node.dp2.di_db[0] =
				    alloc(sblock.fs_fsize, node.dp2.di_mode);
			node.dp2.di_blocks =
			    btodb(fragroundup(&sblock, node.dp2.di_size));
				wtfs(fsbtodb(&sblock, node.dp2.di_db[0]), 
				    sblock.fs_fsize, iobuf);
			iput(&node, UFS_ROOTINO + 1);
		}
	}
}
Esempio n. 15
0
/*
 * Find a suitable location for the journal in the filesystem.
 *
 * Our strategy here is to look for a contiguous block of free space
 * at least "logfile" MB in size (plus room for any indirect blocks).
 * We start at the middle of the filesystem and check each cylinder
 * group working outwards.  If "logfile" MB is not available as a
 * single contigous chunk, then return the address and size of the
 * largest chunk found.
 *
 * XXX 
 * At what stage does the search fail?  Is if the largest space we could
 * find is less than a quarter the requested space reasonable?  If the
 * search fails entirely, return a block address if "0" it indicate this.
 */
void
wapbl_find_log_start(struct mount *mp, struct vnode *vp, off_t logsize,
    daddr_t *addr, daddr_t *indir_addr, size_t *size)
{
	struct ufsmount *ump = VFSTOUFS(mp);
	struct fs *fs = ump->um_fs;
	struct vnode *devvp = ump->um_devvp;
	struct cg *cgp;
	struct buf *bp;
	uint8_t *blksfree;
	daddr_t blkno, best_addr, start_addr;
	daddr_t desired_blks, min_desired_blks;
	daddr_t freeblks, best_blks;
	int bpcg, cg, error, fixedsize, indir_blks, n, s;
#ifdef FFS_EI
	const int needswap = UFS_FSNEEDSWAP(fs);
#endif

	if (logsize == 0) {
		fixedsize = 0;	/* We can adjust the size if tight */
		logsize = lfragtosize(fs, fs->fs_dsize) /
		    UFS_WAPBL_JOURNAL_SCALE;
		DPRINTF("suggested log size = %lld\n", logsize);
		logsize = max(logsize, UFS_WAPBL_MIN_JOURNAL_SIZE);
		logsize = min(logsize, UFS_WAPBL_MAX_JOURNAL_SIZE);
		DPRINTF("adjusted log size = %lld\n", logsize);
	} else {
		fixedsize = 1;
		DPRINTF("fixed log size = %lld\n", logsize);
	}

	desired_blks = logsize / fs->fs_bsize;
	DPRINTF("desired blocks = %lld\n", desired_blks);

	/* add in number of indirect blocks needed */
	indir_blks = 0;
	if (desired_blks >= NDADDR) {
		struct indir indirs[NIADDR + 2];
		int num;

		error = ufs_getlbns(vp, desired_blks, indirs, &num);
		if (error) {
			printf("%s: ufs_getlbns failed, error %d!\n",
			    __func__, error);
			goto bad;
		}

		switch (num) {
		case 2:
			indir_blks = 1;		/* 1st level indirect */
			break;
		case 3:
			indir_blks = 1 +	/* 1st level indirect */
			    1 +			/* 2nd level indirect */
			    indirs[1].in_off + 1; /* extra 1st level indirect */
			break;
		default:
			printf("%s: unexpected numlevels %d from ufs_getlbns\n",
			    __func__, num);
			*size = 0;
			goto bad;
		}
		desired_blks += indir_blks;
	}
	DPRINTF("desired blocks = %lld (including indirect)\n",
	    desired_blks);

	/*
	 * If a specific size wasn't requested, allow for a smaller log
	 * if we're really tight for space...
	 */
	min_desired_blks = desired_blks;
	if (!fixedsize)
		min_desired_blks = desired_blks / 4;

	/* Look at number of blocks per CG.  If it's too small, bail early. */
	bpcg = fragstoblks(fs, fs->fs_fpg);
	if (min_desired_blks > bpcg) {
		printf("ffs_wapbl: cylinder group size of %lld MB "
		    " is not big enough for journal\n",
		    lblktosize(fs, bpcg) / (1024 * 1024));
		goto bad;
	}

	/*
	 * Start with the middle cylinder group, and search outwards in
	 * both directions until we either find the requested log size
	 * or reach the start/end of the file system.  If we reach the
	 * start/end without finding enough space for the full requested
	 * log size, use the largest extent found if it is large enough
	 * to satisfy the our minimum size.
	 *
	 * XXX
	 * Can we just use the cluster contigsum stuff (esp on UFS2)
	 * here to simplify this search code?
	 */
	best_addr = 0;
	best_blks = 0;
	for (cg = fs->fs_ncg / 2, s = 0, n = 1;
	    best_blks < desired_blks && cg >= 0 && cg < fs->fs_ncg;
	    s++, n = -n, cg += n * s) {
		DPRINTF("check cg %d of %d\n", cg, fs->fs_ncg);
		error = bread(devvp, fsbtodb(fs, cgtod(fs, cg)),
		    fs->fs_cgsize, &bp);
		if (error) {
			continue;
		}
		cgp = (struct cg *)bp->b_data;
		if (!cg_chkmagic(cgp)) {
			brelse(bp);
			continue;
		}

		blksfree = cg_blksfree(cgp);

		for (blkno = 0; blkno < bpcg;) {
			/* look for next free block */
			/* XXX use scanc() and fragtbl[] here? */
			for (; blkno < bpcg - min_desired_blks; blkno++)
				if (ffs_isblock(fs, blksfree, blkno))
					break;

			/* past end of search space in this CG? */
			if (blkno >= bpcg - min_desired_blks)
				break;

			/* count how many free blocks in this extent */
			start_addr = blkno;
			for (freeblks = 0; blkno < bpcg; blkno++, freeblks++)
				if (!ffs_isblock(fs, blksfree, blkno))
					break;

			if (freeblks > best_blks) {
				best_blks = freeblks;
				best_addr = blkstofrags(fs, start_addr) +
				    cgbase(fs, cg);

				if (freeblks >= desired_blks) {
					DPRINTF("found len %lld"
					    " at offset %lld in gc\n",
					    freeblks, start_addr);
					break;
				}
			}
		}
		brelse(bp);
	}
	DPRINTF("best found len = %lld, wanted %lld"
	    " at addr %lld\n", best_blks, desired_blks, best_addr);

	if (best_blks < min_desired_blks) {
		*addr = 0;
		*indir_addr = 0;
	} else {
		/* put indirect blocks at start, and data blocks after */
		*addr = best_addr + blkstofrags(fs, indir_blks);
		*indir_addr = best_addr;
	}
	*size = min(desired_blks, best_blks) - indir_blks;
	return;

bad:
	*addr = 0;
	*indir_addr = 0;
	*size = 0;
	return;
}
Esempio n. 16
0
/*
 * Write a superblock and associated information back to disk.
 */
int
ffs_sbupdate(struct ufsmount *mp, int waitfor)
{
	struct fs *dfs, *fs = mp->um_fs;
	struct buf *bp;
	int blks;
	caddr_t space;
	int i, size, error, allerror = 0;

	/*
	 * First write back the summary information.
	 */
	blks = howmany(fs->fs_cssize, fs->fs_fsize);
	space = (caddr_t)fs->fs_csp;
	for (i = 0; i < blks; i += fs->fs_frag) {
		size = fs->fs_bsize;
		if (i + fs->fs_frag > blks)
			size = (blks - i) * fs->fs_fsize;
		bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
			    size, 0, 0);
		memcpy(bp->b_data, space, size);
		space += size;
		if (waitfor != MNT_WAIT)
			bawrite(bp);
		else if ((error = bwrite(bp)))
			allerror = error;
	}

	/*
	 * Now write back the superblock itself. If any errors occurred
	 * up to this point, then fail so that the superblock avoids
	 * being written out as clean.
	 */
	if (allerror) {
		return (allerror);
	}

	bp = getblk(mp->um_devvp,
	    fs->fs_sblockloc >> (fs->fs_fshift - fs->fs_fsbtodb),
	    (int)fs->fs_sbsize, 0, 0);
	fs->fs_fmod = 0;
	fs->fs_time = time_second;
	memcpy(bp->b_data, fs, fs->fs_sbsize);
	/* Restore compatibility to old file systems.		   XXX */
	dfs = (struct fs *)bp->b_data;				/* XXX */
	if (fs->fs_postblformat == FS_42POSTBLFMT)		/* XXX */
		dfs->fs_nrpos = -1;				/* XXX */
	if (fs->fs_inodefmt < FS_44INODEFMT) {			/* XXX */
		int32_t *lp, tmp;				/* XXX */
								/* XXX */
		lp = (int32_t *)&dfs->fs_qbmask;		/* XXX */
		tmp = lp[4];					/* XXX */
		for (i = 4; i > 0; i--)				/* XXX */
			lp[i] = lp[i-1];			/* XXX */
		lp[0] = tmp;					/* XXX */
	}							/* XXX */
	dfs->fs_maxfilesize = mp->um_savedmaxfilesize;		/* XXX */

	ffs1_compat_write(dfs, mp);

	if (waitfor != MNT_WAIT)
		bawrite(bp);
	else if ((error = bwrite(bp)))
		allerror = error;

	return (allerror);
}
Esempio n. 17
0
int
main(int argc, char *argv[])
{
	ino_t ino;
	int dirty;
	union dinode *dp;
	struct	fstab *dt;
	char *map;
	int ch, mode;
	struct tm then;
	struct statfs fsbuf;
	int i, anydirskipped, bflag = 0, Tflag = 0, honorlevel = 1;
	ino_t maxino;
	time_t t;
	int dirlist;
	char *toplevel, *str, *mount_point = NULL;

	spcl.c_date = (int64_t)time(NULL);

	tsize = 0;	/* Default later, based on 'c' option for cart tapes */
	if ((tape = getenv("TAPE")) == NULL)
		tape = _PATH_DEFTAPE;
	dumpdates = _PATH_DUMPDATES;
	temp = _PATH_DTMP;
	if (TP_BSIZE / DEV_BSIZE == 0 || TP_BSIZE % DEV_BSIZE != 0)
		quit("TP_BSIZE must be a multiple of DEV_BSIZE\n");
	level = '0';

	if (argc < 2)
		usage();

	obsolete(&argc, &argv);
	while ((ch = getopt(argc, argv, "0123456789aB:b:cd:f:h:ns:T:uWw")) != -1)
		switch (ch) {
		/* dump level */
		case '0': case '1': case '2': case '3': case '4':
		case '5': case '6': case '7': case '8': case '9':
			level = ch;
			break;

		case 'B':		/* blocks per output file */
			blocksperfile = numarg("blocks per file", 1L, 0L);
			break;

		case 'b':		/* blocks per tape write */
			ntrec = numarg("blocks per write", 1L, 1000L);
			if (ntrec > maxbsize/1024) {
				msg("Please choose a blocksize <= %dKB\n",
				    maxbsize/1024);
				exit(X_STARTUP);
			}
			bflag = 1;
			break;

		case 'c':		/* Tape is cart. not 9-track */
			cartridge = 1;
			break;

		case 'd':		/* density, in bits per inch */
			density = numarg("density", 10L, 327670L) / 10;
			if (density >= 625 && !bflag)
				ntrec = HIGHDENSITYTREC;
			break;

		case 'f':		/* output file */
			tape = optarg;
			break;

		case 'h':
			honorlevel = numarg("honor level", 0L, 10L);
			break;

		case 'n':		/* notify operators */
			notify = 1;
			break;

		case 's':		/* tape size, feet */
			tsize = numarg("tape size", 1L, 0L) * 12 * 10;
			break;

		case 'T':		/* time of last dump */
			str = strptime(optarg, "%a %b %e %H:%M:%S %Y", &then);
			then.tm_isdst = -1;
			if (str == NULL || (*str != '\n' && *str != '\0'))
				spcl.c_ddate = -1;
			else
				spcl.c_ddate = (int64_t)mktime(&then);
			if (spcl.c_ddate < 0) {
				(void)fprintf(stderr, "bad time \"%s\"\n",
				    optarg);
				exit(X_STARTUP);
			}
			Tflag = 1;
			lastlevel = '?';
			break;

		case 'u':		/* update /etc/dumpdates */
			uflag = 1;
			break;

		case 'W':		/* what to do */
		case 'w':
			lastdump(ch);
			exit(X_FINOK);	/* do nothing else */
			break;

		case 'a':		/* `auto-size', Write to EOM. */
			unlimited = 1;
			break;

		default:
			usage();
		}
	argc -= optind;
	argv += optind;

	if (argc < 1) {
		(void)fprintf(stderr, "Must specify disk or filesystem\n");
		exit(X_STARTUP);
	}

	/*
	 *	determine if disk is a subdirectory, and setup appropriately
	 */
	dirlist = 0;
	toplevel = NULL;
	for (i = 0; i < argc; i++) {
		struct stat sb;

		if (lstat(argv[i], &sb) == -1) {
			msg("Cannot lstat %s: %s\n", argv[i], strerror(errno));
			exit(X_STARTUP);
		}
		if (!S_ISDIR(sb.st_mode) && !S_ISREG(sb.st_mode))
			break;
		if (statfs(argv[i], &fsbuf) == -1) {
			msg("Cannot statfs %s: %s\n", argv[i], strerror(errno));
			exit(X_STARTUP);
		}
		if (strcmp(argv[i], fsbuf.f_mntonname) == 0) {
			if (dirlist != 0) {
				msg("Can't dump a mountpoint and a filelist\n");
				exit(X_STARTUP);
			}
			break;		/* exit if sole mountpoint */
		}
		if (!disk) {
			if ((toplevel = strdup(fsbuf.f_mntonname)) == NULL) {
				msg("Cannot malloc diskname\n");
				exit(X_STARTUP);
			}
			disk = toplevel;
			if (uflag) {
				msg("Ignoring u flag for subdir dump\n");
				uflag = 0;
			}
			if (level > '0') {
				msg("Subdir dump is done at level 0\n");
				level = '0';
			}
			msg("Dumping sub files/directories from %s\n", disk);
		} else {
			if (strcmp(disk, fsbuf.f_mntonname) != 0) {
				msg("%s is not on %s\n", argv[i], disk);
				exit(X_STARTUP);
			}
		}
		msg("Dumping file/directory %s\n", argv[i]);
		dirlist++;
	}
	if (dirlist == 0) {
		disk = *argv++;
		if (argc != 1) {
			(void)fputs("Excess arguments to dump:", stderr);
			while (--argc) {
				(void)putc(' ', stderr);
				(void)fputs(*argv++, stderr);
			}
			(void)putc('\n', stderr);
			exit(X_STARTUP);
		}
	}
	if (Tflag && uflag) {
	        (void)fprintf(stderr,
		    "You cannot use the T and u flags together.\n");
		exit(X_STARTUP);
	}
	if (strcmp(tape, "-") == 0) {
		pipeout++;
		tape = "standard output";
	}

	if (blocksperfile)
		blocksperfile = blocksperfile / ntrec * ntrec; /* round down */
	else if (!unlimited) {
		/*
		 * Determine how to default tape size and density
		 *
		 *         	density				tape size
		 * 9-track	1600 bpi (160 bytes/.1")	2300 ft.
		 * 9-track	6250 bpi (625 bytes/.1")	2300 ft.
		 * cartridge	8000 bpi (100 bytes/.1")	1700 ft.
		 *						(450*4 - slop)
		 */
		if (density == 0)
			density = cartridge ? 100 : 160;
		if (tsize == 0)
			tsize = cartridge ? 1700L*120L : 2300L*120L;
	}

	if (strchr(tape, ':')) {
		host = tape;
		tape = strchr(host, ':');
		*tape++ = '\0';
#ifdef RDUMP
		if (rmthost(host) == 0)
			exit(X_STARTUP);
#else
		(void)fprintf(stderr, "remote dump not enabled\n");
		exit(X_STARTUP);
#endif
	}

	if (signal(SIGHUP, SIG_IGN) != SIG_IGN)
		signal(SIGHUP, sig);
	if (signal(SIGTRAP, SIG_IGN) != SIG_IGN)
		signal(SIGTRAP, sig);
	if (signal(SIGFPE, SIG_IGN) != SIG_IGN)
		signal(SIGFPE, sig);
	if (signal(SIGBUS, SIG_IGN) != SIG_IGN)
		signal(SIGBUS, sig);
	if (signal(SIGSEGV, SIG_IGN) != SIG_IGN)
		signal(SIGSEGV, sig);
	if (signal(SIGTERM, SIG_IGN) != SIG_IGN)
		signal(SIGTERM, sig);
	if (signal(SIGINT, interrupt) == SIG_IGN)
		signal(SIGINT, SIG_IGN);

	getfstab();		/* /etc/fstab snarfed */

	/*
	 *	disk can be either the full special file name,
	 *	the suffix of the special file name,
	 *	the special name missing the leading '/',
	 *	the file system name with or without the leading '/'.
	 */
	if (!statfs(disk, &fsbuf) && !strcmp(fsbuf.f_mntonname, disk)) {
		/* mounted disk? */
		disk = rawname(fsbuf.f_mntfromname);
		if (!disk) {
			(void)fprintf(stderr, "cannot get raw name for %s\n",
			    fsbuf.f_mntfromname);
			exit(X_STARTUP);
		}
		mount_point = fsbuf.f_mntonname;
		(void)strlcpy(spcl.c_dev, fsbuf.f_mntfromname,
		    sizeof(spcl.c_dev));
		if (dirlist != 0) {
			(void)snprintf(spcl.c_filesys, sizeof(spcl.c_filesys),
			    "a subset of %s", mount_point);
		} else {
			(void)strlcpy(spcl.c_filesys, mount_point,
			    sizeof(spcl.c_filesys));
		}
	} else if ((dt = fstabsearch(disk)) != NULL) {
		/* in fstab? */
		disk = rawname(dt->fs_spec);
		mount_point = dt->fs_file;
		(void)strlcpy(spcl.c_dev, dt->fs_spec, sizeof(spcl.c_dev));
		if (dirlist != 0) {
			(void)snprintf(spcl.c_filesys, sizeof(spcl.c_filesys),
			    "a subset of %s", mount_point);
		} else {
			(void)strlcpy(spcl.c_filesys, mount_point,
			    sizeof(spcl.c_filesys));
		}
	} else {
		/* must be a device */
		(void)strlcpy(spcl.c_dev, disk, sizeof(spcl.c_dev));
		(void)strlcpy(spcl.c_filesys, "an unlisted file system",
		    sizeof(spcl.c_filesys));
	}
	(void)strlcpy(spcl.c_label, "none", sizeof(spcl.c_label));
	(void)gethostname(spcl.c_host, sizeof(spcl.c_host));
	spcl.c_level = level - '0';
	spcl.c_type = TS_TAPE;
	if (!Tflag)
	        getdumptime();		/* /etc/dumpdates snarfed */

	t = (time_t)spcl.c_date;
	msg("Date of this level %c dump: %s", level,
		t == 0 ? "the epoch\n" : ctime(&t));
	t = (time_t)spcl.c_ddate;
 	msg("Date of last level %c dump: %s", lastlevel,
		t == 0 ? "the epoch\n" : ctime(&t));
	msg("Dumping %s ", disk);
	if (mount_point != NULL)
		msgtail("(%s) ", mount_point);
	if (host)
		msgtail("to %s on host %s\n", tape, host);
	else
		msgtail("to %s\n", tape);

	if ((diskfd = open(disk, O_RDONLY)) < 0) {
		msg("Cannot open %s\n", disk);
		exit(X_STARTUP);
	}
	sync();
	sblock = (struct fs *)sblock_buf;
	for (i = 0; sblock_try[i] != -1; i++) {
		ssize_t n = pread(diskfd, sblock, SBLOCKSIZE,
		    (off_t)sblock_try[i]);
		if (n == SBLOCKSIZE && (sblock->fs_magic == FS_UFS1_MAGIC ||
		     (sblock->fs_magic == FS_UFS2_MAGIC &&
		      sblock->fs_sblockloc == sblock_try[i])) &&
		    sblock->fs_bsize <= MAXBSIZE &&
		    sblock->fs_bsize >= sizeof(struct fs))
			break;
	}
	if (sblock_try[i] == -1)
		quit("Cannot find filesystem superblock\n");
	dev_bsize = sblock->fs_fsize / fsbtodb(sblock, 1);
	dev_bshift = ffs(dev_bsize) - 1;
	if (dev_bsize != (1 << dev_bshift))
		quit("dev_bsize (%d) is not a power of 2\n", dev_bsize);
	tp_bshift = ffs(TP_BSIZE) - 1;
	if (TP_BSIZE != (1 << tp_bshift))
		quit("TP_BSIZE (%d) is not a power of 2\n", TP_BSIZE);
#ifdef FS_44INODEFMT
	if (sblock->fs_magic == FS_UFS2_MAGIC ||
	    sblock->fs_inodefmt >= FS_44INODEFMT)
		spcl.c_flags |= DR_NEWINODEFMT;
#endif
	maxino = sblock->fs_ipg * sblock->fs_ncg;
	mapsize = roundup(howmany(maxino, NBBY), TP_BSIZE);
	usedinomap = (char *)calloc((unsigned) mapsize, sizeof(char));
	dumpdirmap = (char *)calloc((unsigned) mapsize, sizeof(char));
	dumpinomap = (char *)calloc((unsigned) mapsize, sizeof(char));
	tapesize = 3 * (howmany(mapsize * sizeof(char), TP_BSIZE) + 1);

	nonodump = spcl.c_level < honorlevel;

	(void)signal(SIGINFO, statussig);

	msg("mapping (Pass I) [regular files]\n");
	anydirskipped = mapfiles(maxino, &tapesize, toplevel,
	    (dirlist ? argv : NULL));

	msg("mapping (Pass II) [directories]\n");
	while (anydirskipped) {
		anydirskipped = mapdirs(maxino, &tapesize);
	}

	if (pipeout || unlimited) {
		tapesize += 10;	/* 10 trailer blocks */
		msg("estimated %lld tape blocks.\n", tapesize);
	} else {
		double fetapes;

		if (blocksperfile)
			fetapes = (double) tapesize / blocksperfile;
		else if (cartridge) {
			/* Estimate number of tapes, assuming streaming stops at
			   the end of each block written, and not in mid-block.
			   Assume no erroneous blocks; this can be compensated
			   for with an artificially low tape size. */
			fetapes =
			(	  tapesize	/* blocks */
				* TP_BSIZE	/* bytes/block */
				* (1.0/density)	/* 0.1" / byte */
			  +
				  tapesize	/* blocks */
				* (1.0/ntrec)	/* streaming-stops per block */
				* 15.48		/* 0.1" / streaming-stop */
			) * (1.0 / tsize );	/* tape / 0.1" */
		} else {
			/* Estimate number of tapes, for old fashioned 9-track
			   tape */
			int tenthsperirg = (density == 625) ? 3 : 7;
			fetapes =
			(	  tapesize	/* blocks */
				* TP_BSIZE	/* bytes / block */
				* (1.0/density)	/* 0.1" / byte */
			  +
				  tapesize	/* blocks */
				* (1.0/ntrec)	/* IRG's / block */
				* tenthsperirg	/* 0.1" / IRG */
			) * (1.0 / tsize );	/* tape / 0.1" */
		}
		etapes = fetapes;		/* truncating assignment */
		etapes++;
		/* count the dumped inodes map on each additional tape */
		tapesize += (etapes - 1) *
			(howmany(mapsize * sizeof(char), TP_BSIZE) + 1);
		tapesize += etapes + 10;	/* headers + 10 trailer blks */
		msg("estimated %lld tape blocks on %3.2f tape(s).\n",
		    tapesize, fetapes);
	}

	/*
	 * Allocate tape buffer.
	 */
	if (!alloctape())
		quit("can't allocate tape buffers - try a smaller blocking factor.\n");

	startnewtape(1);
	(void)time((time_t *)&(tstart_writing));
	xferrate = 0;
	dumpmap(usedinomap, TS_CLRI, maxino - 1);

	msg("dumping (Pass III) [directories]\n");
	dirty = 0;		/* XXX just to get gcc to shut up */
	for (map = dumpdirmap, ino = 1; ino < maxino; ino++) {
		if (((ino - 1) % NBBY) == 0)	/* map is offset by 1 */
			dirty = *map++;
		else
			dirty >>= 1;
		if ((dirty & 1) == 0)
			continue;
		/*
		 * Skip directory inodes deleted and maybe reallocated
		 */
		dp = getino(ino, &mode);
		if (mode != IFDIR)
			continue;
		(void)dumpino(dp, ino);
	}

	msg("dumping (Pass IV) [regular files]\n");
	for (map = dumpinomap, ino = 1; ino < maxino; ino++) {
		if (((ino - 1) % NBBY) == 0)	/* map is offset by 1 */
			dirty = *map++;
		else
			dirty >>= 1;
		if ((dirty & 1) == 0)
			continue;
		/*
		 * Skip inodes deleted and reallocated as directories.
		 */
		dp = getino(ino, &mode);
		if (mode == IFDIR)
			continue;
		(void)dumpino(dp, ino);
	}

	spcl.c_type = TS_END;
	for (i = 0; i < ntrec; i++)
		writeheader(maxino - 1);
	if (pipeout)
		msg("%lld tape blocks\n", spcl.c_tapea);
	else
		msg("%lld tape blocks on %d volume%s\n",
		    spcl.c_tapea, spcl.c_volume,
		    (spcl.c_volume == 1) ? "" : "s");
	t = (time_t)spcl.c_date;
	msg("Date of this level %c dump: %s", level,
	    t == 0 ? "the epoch\n" : ctime(&t));
	t = do_stats();
	msg("Date this dump completed:  %s", ctime(&t));
	msg("Average transfer rate: %ld KB/s\n", xferrate / tapeno);
	putdumptime();
	trewind();
	broadcast("DUMP IS DONE!\7\7\n");
	msg("DUMP IS DONE\n");
	Exit(X_FINOK);
	/* NOTREACHED */
}
Esempio n. 18
0
/*
 * Reload all incore data for a filesystem (used after running fsck on
 * the root filesystem and finding things to fix). The filesystem must
 * be mounted read-only.
 *
 * Things to do to update the mount:
 *	1) invalidate all cached meta-data.
 *	2) re-read superblock from disk.
 *	3) re-read summary information from disk.
 *	4) invalidate all inactive vnodes.
 *	5) invalidate all cached file data.
 *	6) re-read inode data for all active vnodes.
 */
int
ffs_reload(struct mount *mountp, struct ucred *cred, struct proc *p)
{
	struct vnode *devvp;
	caddr_t space;
	struct fs *fs, *newfs;
	int i, blks, size, error;
	int32_t *lp;
	struct buf *bp = NULL;
	struct ffs_reload_args fra;

	if ((mountp->mnt_flag & MNT_RDONLY) == 0)
		return (EINVAL);
	/*
	 * Step 1: invalidate all cached meta-data.
	 */
	devvp = VFSTOUFS(mountp)->um_devvp;
	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
	error = vinvalbuf(devvp, 0, cred, p, 0, 0);
	VOP_UNLOCK(devvp, 0, p);
	if (error)
		panic("ffs_reload: dirty1");

	/*
	 * Step 2: re-read superblock from disk.
	 */
	fs = VFSTOUFS(mountp)->um_fs;

	error = bread(devvp, fs->fs_sblockloc / DEV_BSIZE, SBSIZE, &bp);
	if (error) {
		brelse(bp);
		return (error);
	}

	newfs = (struct fs *)bp->b_data;
	if (ffs_validate(newfs) == 0) {
		brelse(bp);
		return (EINVAL);
	}

	/*
	 * Copy pointer fields back into superblock before copying in	XXX
	 * new superblock. These should really be in the ufsmount.	XXX
	 * Note that important parameters (eg fs_ncg) are unchanged.
	 */
	newfs->fs_csp = fs->fs_csp;
	newfs->fs_maxcluster = fs->fs_maxcluster;
	newfs->fs_ronly = fs->fs_ronly;
	memcpy(fs, newfs, fs->fs_sbsize);
	if (fs->fs_sbsize < SBSIZE)
		bp->b_flags |= B_INVAL;
	brelse(bp);
	mountp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
	ffs1_compat_read(fs, VFSTOUFS(mountp), fs->fs_sblockloc);
	ffs_oldfscompat(fs);
	(void)ffs_statfs(mountp, &mountp->mnt_stat, p);
	/*
	 * Step 3: re-read summary information from disk.
	 */
	blks = howmany(fs->fs_cssize, fs->fs_fsize);
	space = (caddr_t)fs->fs_csp;
	for (i = 0; i < blks; i += fs->fs_frag) {
		size = fs->fs_bsize;
		if (i + fs->fs_frag > blks)
			size = (blks - i) * fs->fs_fsize;
		error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size, &bp);
		if (error) {
			brelse(bp);
			return (error);
		}
		memcpy(space, bp->b_data, size);
		space += size;
		brelse(bp);
	}
	if ((fs->fs_flags & FS_DOSOFTDEP))
		(void) softdep_mount(devvp, mountp, fs, cred);
	/*
	 * We no longer know anything about clusters per cylinder group.
	 */
	if (fs->fs_contigsumsize > 0) {
		lp = fs->fs_maxcluster;
		for (i = 0; i < fs->fs_ncg; i++)
			*lp++ = fs->fs_contigsumsize;
	}

	fra.p = p;
	fra.cred = cred;
	fra.fs = fs;
	fra.devvp = devvp;

	error = vfs_mount_foreach_vnode(mountp, ffs_reload_vnode, &fra);

	return (error);
}
Esempio n. 19
0
/*
 * Read information about /boot's inode, then put this and filesystem
 * parameters from the superblock into pbr_symbols.
 */
static int
getbootparams(char *boot, int devfd, struct disklabel *dl)
{
	int		fd;
	struct stat	statbuf, sb;
	struct statfs	statfsbuf;
	struct partition *pp;
	struct fs	*fs;
	char		*buf;
	u_int		blk, *ap;
	struct ufs1_dinode	*ip1;
	struct ufs2_dinode	*ip2;
	int		ndb;
	int		mib[3];
	size_t		size;
	dev_t		dev;
	int		skew;

	/*
	 * Open 2nd-level boot program and record enough details about
	 * where it is on the filesystem represented by `devfd'
	 * (inode block, offset within that block, and various filesystem
	 * parameters essentially taken from the superblock) for biosboot
	 * to be able to load it later.
	 */

	/* Make sure the (probably new) boot file is on disk. */
	sync(); sleep(1);

	if ((fd = open(boot, O_RDONLY)) < 0)
		err(1, "open: %s", boot);

	if (fstatfs(fd, &statfsbuf) != 0)
		err(1, "statfs: %s", boot);

	if (strncmp(statfsbuf.f_fstypename, "ffs", MFSNAMELEN) &&
	    strncmp(statfsbuf.f_fstypename, "ufs", MFSNAMELEN) )
		errx(1, "%s: not on an FFS filesystem", boot);

#if 0
	if (read(fd, &eh, sizeof(eh)) != sizeof(eh))
		errx(1, "read: %s", boot);

	if (!IS_ELF(eh)) {
		errx(1, "%s: bad magic: 0x%02x%02x%02x%02x",
		    boot,
		    eh.e_ident[EI_MAG0], eh.e_ident[EI_MAG1],
		    eh.e_ident[EI_MAG2], eh.e_ident[EI_MAG3]);
	}
#endif

	if (fsync(fd) != 0)
		err(1, "fsync: %s", boot);

	if (fstat(fd, &statbuf) != 0)
		err(1, "fstat: %s", boot);

	if (fstat(devfd, &sb) != 0)
		err(1, "fstat: %s", realdev);

	/* Check devices. */
	mib[0] = CTL_MACHDEP;
	mib[1] = CPU_CHR2BLK;
	mib[2] = sb.st_rdev;
	size = sizeof(dev);
	if (sysctl(mib, 3, &dev, &size, NULL, 0) >= 0) {
		if (statbuf.st_dev / MAXPARTITIONS != dev / MAXPARTITIONS)
			errx(1, "cross-device install");
	}

	pp = &dl->d_partitions[DISKPART(statbuf.st_dev)];
	close(fd);

	sbread(devfd, DL_SECTOBLK(dl, DL_GETPOFFSET(pp)), &fs);

	/* Read inode. */
	if ((buf = malloc(fs->fs_bsize)) == NULL)
		err(1, NULL);

	blk = fsbtodb(fs, ino_to_fsba(fs, statbuf.st_ino));

	/*
	 * Have the inode.  Figure out how many filesystem blocks (not disk
	 * sectors) there are for biosboot to load.
	 */
	devread(devfd, buf, DL_SECTOBLK(dl, pp->p_offset) + blk,
	    fs->fs_bsize, "inode");
	if (fs->fs_magic == FS_UFS2_MAGIC) {
		ip2 = (struct ufs2_dinode *)(buf) +
		    ino_to_fsbo(fs, statbuf.st_ino);
		ndb = howmany(ip2->di_size, fs->fs_bsize);
		ap = (u_int *)ip2->di_db;
		skew = sizeof(u_int32_t);
	} else {
		ip1 = (struct ufs1_dinode *)(buf) +
		    ino_to_fsbo(fs, statbuf.st_ino);
		ndb = howmany(ip1->di_size, fs->fs_bsize);
		ap = (u_int *)ip1->di_db;
		skew = 0;
	}
	if (ndb <= 0)
		errx(1, "No blocks to load");

	/*
	 * Now set the values that will need to go into biosboot
	 * (the partition boot record, a.k.a. the PBR).
	 */
	sym_set_value(pbr_symbols, "_fs_bsize_p", (fs->fs_bsize / 16));
	sym_set_value(pbr_symbols, "_fs_bsize_s", (fs->fs_bsize / 
	    dl->d_secsize));

	/*
	 * fs_fsbtodb is the shift to convert fs_fsize to DEV_BSIZE. The
	 * ino_to_fsba() return value is the number of fs_fsize units.
	 * Calculate the shift to convert fs_fsize into physical sectors,
	 * which are added to p_offset to get the sector address BIOS
	 * will use.
	 *
	 * N.B.: ASSUMES fs_fsize is a power of 2 of d_secsize.
	 */
	sym_set_value(pbr_symbols, "_fsbtodb",
	    ffs(fs->fs_fsize / dl->d_secsize) - 1);

	if (pp->p_offseth != 0)
		errx(1, "partition offset too high");
	sym_set_value(pbr_symbols, "_p_offset", pp->p_offset);
	sym_set_value(pbr_symbols, "_inodeblk",
	    ino_to_fsba(fs, statbuf.st_ino));
	sym_set_value(pbr_symbols, "_inodedbl",
	    ((((char *)ap) - buf) + INODEOFF));
	sym_set_value(pbr_symbols, "_nblocks", ndb);
	sym_set_value(pbr_symbols, "_blkskew", skew);

	if (verbose) {
		fprintf(stderr, "%s is %d blocks x %d bytes\n",
		    boot, ndb, fs->fs_bsize);
		fprintf(stderr, "fs block shift %u; part offset %llu; "
		    "inode block %lld, offset %u\n",
		    ffs(fs->fs_fsize / dl->d_secsize) - 1,
		    DL_GETPOFFSET(pp), ino_to_fsba(fs, statbuf.st_ino),
		    (unsigned int)((((char *)ap) - buf) + INODEOFF));
		fprintf(stderr, "expecting %d-bit fs blocks (skew %d)\n",
		    skew ? 64 : 32, skew);
	}

	return 0;
}
Esempio n. 20
0
/*
 * Common code for mount and mountroot
 */
int
ffs_mountfs(struct vnode *devvp, struct mount *mp, struct proc *p)
{
	struct ufsmount *ump;
	struct buf *bp;
	struct fs *fs;
	dev_t dev;
	caddr_t space;
	daddr_t sbloc;
	int error, i, blks, size, ronly;
	int32_t *lp;
	struct ucred *cred;
	u_int64_t maxfilesize;					/* XXX */

	dev = devvp->v_rdev;
	cred = p ? p->p_ucred : NOCRED;
	/*
	 * Disallow multiple mounts of the same device.
	 * Disallow mounting of a device that is currently in use
	 * (except for root, which might share swap device for miniroot).
	 * Flush out any old buffers remaining from a previous use.
	 */
	if ((error = vfs_mountedon(devvp)) != 0)
		return (error);
	if (vcount(devvp) > 1 && devvp != rootvp)
		return (EBUSY);
	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
	error = vinvalbuf(devvp, V_SAVE, cred, p, 0, 0);
	VOP_UNLOCK(devvp, 0, p);
	if (error)
		return (error);

	ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
	error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p);
	if (error)
		return (error);

	bp = NULL;
	ump = NULL;

	/*
	 * Try reading the super-block in each of its possible locations.
	 */
	for (i = 0; sbtry[i] != -1; i++) {
		if (bp != NULL) {
			bp->b_flags |= B_NOCACHE;
			brelse(bp);
			bp = NULL;
		}

		error = bread(devvp, sbtry[i] / DEV_BSIZE, SBSIZE, &bp);
		if (error)
			goto out;

		fs = (struct fs *) bp->b_data;
		sbloc = sbtry[i];

#if 0
		if (fs->fs_magic == FS_UFS2_MAGIC) {
			printf("ffs_mountfs(): Sorry, no UFS2 support (yet)\n");
			error = EFTYPE;
			goto out;
		}
#endif

		/*
		 * Do not look for an FFS1 file system at SBLOCK_UFS2. Doing so
		 * will find the wrong super-block for file systems with 64k
		 * block size.
		 */
		if (fs->fs_magic == FS_UFS1_MAGIC && sbloc == SBLOCK_UFS2)
			continue;

		if (ffs_validate(fs))
			break; /* Super block validated */
	}

	if (sbtry[i] == -1) {
		error = EINVAL;
		goto out;
	}

	fs->fs_fmod = 0;
	fs->fs_flags &= ~FS_UNCLEAN;
	if (fs->fs_clean == 0) {
#if 0
		/*
		 * It is safe to mount an unclean file system
		 * if it was previously mounted with softdep
		 * but we may lose space and must
		 * sometimes run fsck manually.
		 */
		if (fs->fs_flags & FS_DOSOFTDEP)
			printf(
"WARNING: %s was not properly unmounted\n",
			    fs->fs_fsmnt);
		else
#endif
		if (ronly || (mp->mnt_flag & MNT_FORCE)) {
			printf(
"WARNING: %s was not properly unmounted\n",
			    fs->fs_fsmnt);
		} else {
			printf(
"WARNING: R/W mount of %s denied.  Filesystem is not clean - run fsck\n",
			    fs->fs_fsmnt);
			error = EROFS;
			goto out;
		}
	}

	if (fs->fs_postblformat == FS_42POSTBLFMT && !ronly) {
#ifndef SMALL_KERNEL
		printf("ffs_mountfs(): obsolete rotational table format, "
		    "please use fsck_ffs(8) -c 1\n");
#endif
		error = EFTYPE;
		goto out;
	}

	ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK|M_ZERO);
	ump->um_fs = malloc((u_long)fs->fs_sbsize, M_UFSMNT,
	    M_WAITOK);

	if (fs->fs_magic == FS_UFS1_MAGIC)
		ump->um_fstype = UM_UFS1;
#ifdef FFS2
	else
		ump->um_fstype = UM_UFS2;
#endif

	memcpy(ump->um_fs, bp->b_data, fs->fs_sbsize);
	if (fs->fs_sbsize < SBSIZE)
		bp->b_flags |= B_INVAL;
	brelse(bp);
	bp = NULL;
	fs = ump->um_fs;

	ffs1_compat_read(fs, ump, sbloc);

	if (fs->fs_clean == 0)
		fs->fs_flags |= FS_UNCLEAN;
	fs->fs_ronly = ronly;
	size = fs->fs_cssize;
	blks = howmany(size, fs->fs_fsize);
	if (fs->fs_contigsumsize > 0)
		size += fs->fs_ncg * sizeof(int32_t);
	space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
	fs->fs_csp = (struct csum *)space;
	for (i = 0; i < blks; i += fs->fs_frag) {
		size = fs->fs_bsize;
		if (i + fs->fs_frag > blks)
			size = (blks - i) * fs->fs_fsize;
		error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size, &bp);
		if (error) {
			free(fs->fs_csp, M_UFSMNT, 0);
			goto out;
		}
		memcpy(space, bp->b_data, size);
		space += size;
		brelse(bp);
		bp = NULL;
	}
	if (fs->fs_contigsumsize > 0) {
		fs->fs_maxcluster = lp = (int32_t *)space;
		for (i = 0; i < fs->fs_ncg; i++)
			*lp++ = fs->fs_contigsumsize;
	}
	mp->mnt_data = (qaddr_t)ump;
	mp->mnt_stat.f_fsid.val[0] = (long)dev;
	/* Use on-disk fsid if it exists, else fake it */
	if (fs->fs_id[0] != 0 && fs->fs_id[1] != 0)
		mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1];
	else
		mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum;
	mp->mnt_stat.f_namemax = MAXNAMLEN;
	mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
	mp->mnt_flag |= MNT_LOCAL;
	ump->um_mountp = mp;
	ump->um_dev = dev;
	ump->um_devvp = devvp;
	ump->um_nindir = fs->fs_nindir;
	ump->um_bptrtodb = fs->fs_fsbtodb;
	ump->um_seqinc = fs->fs_frag;
	for (i = 0; i < MAXQUOTAS; i++)
		ump->um_quotas[i] = NULLVP;

	devvp->v_specmountpoint = mp;
	ffs_oldfscompat(fs);

	if (ronly)
		fs->fs_contigdirs = NULL;
	else {
		fs->fs_contigdirs = malloc((u_long)fs->fs_ncg,
		    M_UFSMNT, M_WAITOK|M_ZERO);
	}

	/*
	 * Set FS local "last mounted on" information (NULL pad)
	 */
	memset(fs->fs_fsmnt, 0, sizeof(fs->fs_fsmnt));
	strlcpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname, sizeof(fs->fs_fsmnt));

#if 0
	if( mp->mnt_flag & MNT_ROOTFS) {
		/*
		 * Root mount; update timestamp in mount structure.
		 * this will be used by the common root mount code
		 * to update the system clock.
		 */
		mp->mnt_time = fs->fs_time;
	}
#endif

	/*
	 * XXX
	 * Limit max file size.  Even though ffs can handle files up to 16TB,
	 * we do limit the max file to 2^31 pages to prevent overflow of
	 * a 32-bit unsigned int.  The buffer cache has its own checks but
	 * a little added paranoia never hurts.
	 */
	ump->um_savedmaxfilesize = fs->fs_maxfilesize;		/* XXX */
	maxfilesize = FS_KERNMAXFILESIZE(PAGE_SIZE, fs);
	if (fs->fs_maxfilesize > maxfilesize)			/* XXX */
		fs->fs_maxfilesize = maxfilesize;		/* XXX */
	if (ronly == 0) {
		if ((fs->fs_flags & FS_DOSOFTDEP) &&
		    (error = softdep_mount(devvp, mp, fs, cred)) != 0) {
			free(fs->fs_csp, M_UFSMNT, 0);
			free(fs->fs_contigdirs, M_UFSMNT, 0);
			goto out;
		}
		fs->fs_fmod = 1;
		fs->fs_clean = 0;
		if (mp->mnt_flag & MNT_SOFTDEP)
			fs->fs_flags |= FS_DOSOFTDEP;
		else
			fs->fs_flags &= ~FS_DOSOFTDEP;
		error = ffs_sbupdate(ump, MNT_WAIT);
		if (error == EROFS)
			goto out;
	}
	return (0);
out:
	devvp->v_specmountpoint = NULL;
	if (bp)
		brelse(bp);

	vn_lock(devvp, LK_EXCLUSIVE|LK_RETRY, p);
	(void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred, p);
	VOP_UNLOCK(devvp, 0, p);

	if (ump) {
		free(ump->um_fs, M_UFSMNT, ump->um_fs->fs_sbsize);
		free(ump, M_UFSMNT, sizeof(*ump));
		mp->mnt_data = NULL;
	}
	return (error);
}
Esempio n. 21
0
static int
ext2_indirtrunc(struct inode *ip, daddr_t lbn, daddr_t dbn,
    daddr_t lastbn, int level, e4fs_daddr_t *countp)
{
	struct buf *bp;
	struct m_ext2fs *fs = ip->i_e2fs;
	struct vnode *vp;
	e2fs_daddr_t *bap, *copy;
	int i, nblocks, error = 0, allerror = 0;
	e2fs_lbn_t nb, nlbn, last;
	e4fs_daddr_t blkcount, factor, blocksreleased = 0;

	/*
	 * Calculate index in current block of last
	 * block to be kept.  -1 indicates the entire
	 * block so we need not calculate the index.
	 */
	factor = 1;
	for (i = SINGLE; i < level; i++)
		factor *= NINDIR(fs);
	last = lastbn;
	if (lastbn > 0)
		last /= factor;
	nblocks = btodb(fs->e2fs_bsize);
	/*
	 * Get buffer of block pointers, zero those entries corresponding
	 * to blocks to be free'd, and update on disk copy first.  Since
	 * double(triple) indirect before single(double) indirect, calls
	 * to bmap on these blocks will fail.  However, we already have
	 * the on disk address, so we have to set the b_blkno field
	 * explicitly instead of letting bread do everything for us.
	 */
	vp = ITOV(ip);
	bp = getblk(vp, lbn, (int)fs->e2fs_bsize, 0, 0, 0);
	if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) {
		bp->b_iocmd = BIO_READ;
		if (bp->b_bcount > bp->b_bufsize)
			panic("ext2_indirtrunc: bad buffer size");
		bp->b_blkno = dbn;
		vfs_busy_pages(bp, 0);
		bp->b_iooffset = dbtob(bp->b_blkno);
		bstrategy(bp);
		error = bufwait(bp);
	}
	if (error) {
		brelse(bp);
		*countp = 0;
		return (error);
	}

	bap = (e2fs_daddr_t *)bp->b_data;
	copy = malloc(fs->e2fs_bsize, M_TEMP, M_WAITOK);
	bcopy((caddr_t)bap, (caddr_t)copy, (u_int)fs->e2fs_bsize);
	bzero((caddr_t)&bap[last + 1],
	  (NINDIR(fs) - (last + 1)) * sizeof(e2fs_daddr_t));
	if (last == -1)
		bp->b_flags |= B_INVAL;
	if (DOINGASYNC(vp)) {
		bdwrite(bp);
	} else {
		error = bwrite(bp);
		if (error)
			allerror = error;
	}
	bap = copy;

	/*
	 * Recursively free totally unused blocks.
	 */
	for (i = NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last;
	    i--, nlbn += factor) {
		nb = bap[i];
		if (nb == 0)
			continue;
		if (level > SINGLE) {
			if ((error = ext2_indirtrunc(ip, nlbn,
			    fsbtodb(fs, nb), (int32_t)-1, level - 1, &blkcount)) != 0)
				allerror = error;
			blocksreleased += blkcount;
		}
		ext2_blkfree(ip, nb, fs->e2fs_bsize);
		blocksreleased += nblocks;
	}

	/*
	 * Recursively free last partial block.
	 */
	if (level > SINGLE && lastbn >= 0) {
		last = lastbn % factor;
		nb = bap[i];
		if (nb != 0) {
			if ((error = ext2_indirtrunc(ip, nlbn, fsbtodb(fs, nb),
			    last, level - 1, &blkcount)) != 0)
				allerror = error;
			blocksreleased += blkcount;
		}
	}
	free(copy, M_TEMP);
	*countp = blocksreleased;
	return (allerror);
}
Esempio n. 22
0
static int
find_superblock(caddr_t devstr)
{
	int cg = 0;
	int retval = 0;
	int first;
	int found;
	calcsb_t style;
	struct fs proto;

	/*
	 * Check the superblock, looking for alternates if necessary.
	 * In more-recent times, some UFS instances get created with
	 * only the first ten and last ten superblock backups.  Since
	 * if we can't get the necessary information from any of those,
	 * the odds are also against us for the ones in between, we'll
	 * just look at those twenty to save time.
	 */
	if (!read_super_block(1) || !checksb(1)) {
		if (bflag || preen) {
			retval = -1;
			goto finish;
		}
		for (style = MKFS_STYLE; style < MAX_SB_STYLES; style++) {
			if (reply("LOOK FOR ALTERNATE SUPERBLOCKS WITH %s",
			    calcsb_names[style]) == 0)
				continue;
			first = 1;
			found = 0;
			if (!calcsb(style, devstr, fsreadfd, &proto)) {
				cg = proto.fs_ncg;
				continue;
			}
			if (debug) {
				(void) printf(
			    "debug: calcsb(%s) gave fpg %d, cgoffset %d, ",
				    calcsb_names[style],
				    proto.fs_fpg, proto.fs_cgoffset);
				(void) printf("cgmask 0x%x, sblk %d, ncg %d\n",
				    proto.fs_cgmask, proto.fs_sblkno,
				    proto.fs_ncg);
			}
			for (cg = 0; cg < proto.fs_ncg; cg++) {
				bflag = fsbtodb(&proto, cgsblock(&proto, cg));
				if (debug)
					(void) printf(
					    "debug: trying block %lld\n",
					    (longlong_t)bflag);
				if (read_super_block(0) && checksb(0)) {
					(void) printf(
				    "FOUND ALTERNATE SUPERBLOCK %d WITH %s\n",
					    bflag, calcsb_names[style]);
					if (reply(
					    "USE ALTERNATE SUPERBLOCK") == 1) {
						found = 1;
						break;
					}
				}
				if (first && (cg >= 9)) {
					first = 0;
					if (proto.fs_ncg <= 9)
						cg = proto.fs_ncg;
					else if (proto.fs_ncg <= 19)
						cg = 9;
					else
						cg = proto.fs_ncg - 10;
				}
			}

			if (found)
				break;
		}

		/*
		 * Didn't find one?  Try to fake it.
		 */
		if (style >= MAX_SB_STYLES) {
			pwarn("SEARCH FOR ALTERNATE SUPERBLOCKS FAILED.\n");
			for (style = MKFS_STYLE; style < MAX_SB_STYLES;
			    style++) {
				if (reply("USE GENERIC SUPERBLOCK FROM %s",
				    calcsb_names[style]) == 1 &&
				    calcsb(style, devstr, fsreadfd, &sblock)) {
					break;
			}
			/*
			 * We got something from mkfs/newfs, so use it.
			 */
			if (style < MAX_SB_STYLES)
				proto.fs_ncg = sblock.fs_ncg;
				bflag = 0;
			}
		}

		/*
		 * Still no luck?  Tell the user they're on their own.
		 */
		if (style >= MAX_SB_STYLES) {
			pwarn("SEARCH FOR ALTERNATE SUPERBLOCKS FAILED. "
			    "YOU MUST USE THE -o b OPTION\n"
			    "TO FSCK TO SPECIFY THE LOCATION OF A VALID "
			    "ALTERNATE SUPERBLOCK TO\n"
			    "SUPPLY NEEDED INFORMATION; SEE fsck(1M).\n");
			bflag = 0;
			retval = -1;
			goto finish;
		}

		/*
		 * Need to make sure a human really wants us to use
		 * this.  -y mode could've gotten us this far, so
		 * we need to ask something that has to be answered
		 * in the negative.
		 *
		 * Note that we can't get here when preening.
		 */
		if (!found) {
			pwarn("CALCULATED GENERIC SUPERBLOCK WITH %s\n",
			    calcsb_names[style]);
		} else {
			pwarn("FOUND ALTERNATE SUPERBLOCK AT %d USING %s\n",
			    bflag, calcsb_names[style]);
		}
		pwarn("If filesystem was created with manually-specified ");
		pwarn("geometry, using\nauto-discovered superblock may ");
		pwarn("result in irrecoverable damage to\nfilesystem and ");
		pwarn("user data.\n");
		if (reply("CANCEL FILESYSTEM CHECK") == 1) {
			if (cg >= 0) {
				pwarn("Please verify that the indicated block "
				    "contains a proper\nsuperblock for the "
				    "filesystem (see fsdb(1M)).\n");
				if (yflag)
					pwarn("\nFSCK was running in YES "
					    "mode.  If you wish to run in "
					    "that mode using\nthe alternate "
					    "superblock, run "
					    "`fsck -y -o b=%d %s'.\n",
					    bflag, devstr);
			}
			retval = -1;
			goto finish;
		}

		/*
		 * Pretend we found it as an alternate, so everything
		 * gets updated when we clean up at the end.
		 */
		if (!found) {
			havesb = 1;
			sblk.b_bno = fsbtodb(&sblock, cgsblock(&sblock, 0));
			bwrite(fswritefd, (caddr_t)&sblock, SBLOCK, SBSIZE);
			write_altsb(fswritefd);
		}
	}

finish:
	return (retval);
}
Esempio n. 23
0
/*
 * Determine whether an inode can be allocated.
 *
 * Check to see if an inode is available, and if it is,
 * allocate it using the following policy:
 *   1) allocate the requested inode.
 *   2) allocate the next available inode after the requested
 *	  inode in the specified cylinder group.
 */
static int32_t
ext2fs_nodealloccg(struct inode *ip, int cg, int32_t ipref, int mode)
{
	struct m_ext2fs *fs;
	char *ibp;
	struct buf *bp;
	int error, start, len, loc, map, i;

	ipref--; /* to avoid a lot of (ipref -1) */
	fs = ip->i_e2fs;
	if (fs->e2fs_gd[cg].ext2bgd_nifree == 0)
		return (0);
	error = bread(ip->i_devvp, fsbtodb(fs,
		fs->e2fs_gd[cg].ext2bgd_i_bitmap),
		(int)fs->e2fs_bsize, &bp);
	if (error) {
		brelse(bp);
		return (0);
	}
	ibp = (char *)bp->b_data;
	if (ipref) {
		ipref %= fs->e2fs.e2fs_ipg;
		if (isclr(ibp, ipref))
			goto gotit;
	}
	start = ipref / NBBY;
	len = howmany(fs->e2fs.e2fs_ipg - ipref, NBBY);
	loc = skpc(0xff, len, &ibp[start]);
	if (loc == 0) {
		len = start + 1;
		start = 0;
		loc = skpc(0xff, len, &ibp[0]);
		if (loc == 0) {
			printf("cg = %d, ipref = %d, fs = %s\n",
				cg, ipref, fs->e2fs_fsmnt);
			panic("ext2fs_nodealloccg: map corrupted");
			/* NOTREACHED */
		}
	}
	i = start + len - loc;
	map = ibp[i];
	ipref = i * NBBY;
	for (i = 1; i < (1 << NBBY); i <<= 1, ipref++) {
		if ((map & i) == 0) {
			goto gotit;
		}
	}
	printf("fs = %s\n", fs->e2fs_fsmnt);
	panic("ext2fs_nodealloccg: block not in map");
	/* NOTREACHED */
gotit:
	setbit(ibp, ipref);
	fs->e2fs.e2fs_ficount--;
	fs->e2fs_gd[cg].ext2bgd_nifree--;
	fs->e2fs_fmod = 1;
	if ((mode & IFMT) == IFDIR) {
		fs->e2fs_gd[cg].ext2bgd_ndirs++;
	}
	bdwrite(bp);
	return (cg * fs->e2fs.e2fs_ipg + ipref +1);
}
Esempio n. 24
0
void
pass5(void)
{
	int c, blk, frags, basesize, sumsize, mapsize, cssize;
	int inomapsize, blkmapsize;
	struct fs *fs = sblock;
	daddr_t dbase, dmax;
	daddr_t d;
	long i, j, k;
	struct csum *cs;
	struct csum_total cstotal;
	struct inodesc idesc[4];
	char buf[MAXBSIZE];
	struct cg *newcg = (struct cg *)buf;
	struct ocg *ocg = (struct ocg *)buf;
	struct cg *cg = cgrp, *ncg;
	struct inostat *info;
	u_int32_t ncgsize;

	inoinfo(WINO)->ino_state = USTATE;
	memset(newcg, 0, (size_t)fs->fs_cgsize);
	newcg->cg_niblk = fs->fs_ipg;
	if (cvtlevel >= 3) {
		if (fs->fs_maxcontig < 2 && fs->fs_contigsumsize > 0) {
			if (preen)
				pwarn("DELETING CLUSTERING MAPS\n");
			if (preen || reply("DELETE CLUSTERING MAPS")) {
				fs->fs_contigsumsize = 0;
				doinglevel1 = 1;
				sbdirty();
			}
		}
		if (fs->fs_maxcontig > 1) {
			const char *doit = NULL;

			if (fs->fs_contigsumsize < 1) {
				doit = "CREAT";
			} else if (fs->fs_contigsumsize < fs->fs_maxcontig &&
				   fs->fs_contigsumsize < FS_MAXCONTIG) {
				doit = "EXPAND";
			}
			if (doit) {
				i = fs->fs_contigsumsize;
				fs->fs_contigsumsize =
				    MIN(fs->fs_maxcontig, FS_MAXCONTIG);
				if (CGSIZE(fs) > fs->fs_bsize) {
					pwarn("CANNOT %s CLUSTER MAPS\n", doit);
					fs->fs_contigsumsize = i;
				} else if (preen ||
				    reply("CREATE CLUSTER MAPS")) {
					if (preen)
						pwarn("%sING CLUSTER MAPS\n",
						    doit);
					ncgsize = fragroundup(fs, CGSIZE(fs));
					ncg = realloc(cgrp, ncgsize);
					if (ncg == NULL)
						errexit(
						"cannot reallocate cg space");
					cg = cgrp = ncg;
					fs->fs_cgsize = ncgsize;
					doinglevel1 = 1;
					sbdirty();
				}
			}
		}
	}
	basesize = &newcg->cg_space[0] - (u_char *)(&newcg->cg_firstfield);
	cssize = (u_char *)&cstotal.cs_spare[0] - (u_char *)&cstotal.cs_ndir;
	sumsize = 0;
	if (is_ufs2) {
		newcg->cg_iusedoff = basesize;
	} else {
		/*
		 * We reserve the space for the old rotation summary
		 * tables for the benefit of old kernels, but do not
		 * maintain them in modern kernels. In time, they can
		 * go away.
		 */
		newcg->cg_old_btotoff = basesize;
		newcg->cg_old_boff = newcg->cg_old_btotoff +
		    fs->fs_old_cpg * sizeof(int32_t);
		newcg->cg_iusedoff = newcg->cg_old_boff +
		    fs->fs_old_cpg * fs->fs_old_nrpos * sizeof(u_int16_t);
		memset(&newcg->cg_space[0], 0, newcg->cg_iusedoff - basesize);
	}
	inomapsize = howmany(fs->fs_ipg, CHAR_BIT);
	newcg->cg_freeoff = newcg->cg_iusedoff + inomapsize;
	blkmapsize = howmany(fs->fs_fpg, CHAR_BIT);
	newcg->cg_nextfreeoff = newcg->cg_freeoff + blkmapsize;
	if (fs->fs_contigsumsize > 0) {
		newcg->cg_clustersumoff = newcg->cg_nextfreeoff -
		    sizeof(u_int32_t);
		if (isappleufs) {
			/* Apple PR2216969 gives rationale for this change.
			 * I believe they were mistaken, but we need to
			 * duplicate it for compatibility.  -- [email protected]
			 */
			newcg->cg_clustersumoff += sizeof(u_int32_t);
		}
		newcg->cg_clustersumoff =
		    roundup(newcg->cg_clustersumoff, sizeof(u_int32_t));
		newcg->cg_clusteroff = newcg->cg_clustersumoff +
		    (fs->fs_contigsumsize + 1) * sizeof(u_int32_t);
		newcg->cg_nextfreeoff = newcg->cg_clusteroff +
		    howmany(fragstoblks(fs, fs->fs_fpg), CHAR_BIT);
	}
	newcg->cg_magic = CG_MAGIC;
	mapsize = newcg->cg_nextfreeoff - newcg->cg_iusedoff;
	if (!is_ufs2 && ((fs->fs_old_flags & FS_FLAGS_UPDATED) == 0)) {
		switch ((int)fs->fs_old_postblformat) {

		case FS_42POSTBLFMT:
			basesize = (char *)(&ocg->cg_btot[0]) -
			    (char *)(&ocg->cg_firstfield);
			sumsize = &ocg->cg_iused[0] - (u_int8_t *)(&ocg->cg_btot[0]);
			mapsize = &ocg->cg_free[howmany(fs->fs_fpg, NBBY)] -
			    (u_char *)&ocg->cg_iused[0];
			blkmapsize = howmany(fs->fs_fpg, NBBY);
			inomapsize = &ocg->cg_free[0] - (u_char *)&ocg->cg_iused[0];
			ocg->cg_magic = CG_MAGIC;
			newcg->cg_magic = 0;
			break;

		case FS_DYNAMICPOSTBLFMT:
			sumsize = newcg->cg_iusedoff - newcg->cg_old_btotoff;
			break;

		default:
			errexit("UNKNOWN ROTATIONAL TABLE FORMAT %d",
			    fs->fs_old_postblformat);
		}
	}
	memset(&idesc[0], 0, sizeof idesc);
	for (i = 0; i < 4; i++) {
		idesc[i].id_type = ADDR;
		if (!is_ufs2 && doinglevel2)
			idesc[i].id_fix = FIX;
	}
	memset(&cstotal, 0, sizeof(struct csum_total));
	dmax = blknum(fs, fs->fs_size + fs->fs_frag - 1);
	for (d = fs->fs_size; d < dmax; d++)
		setbmap(d);
	for (c = 0; c < fs->fs_ncg; c++) {
		if (got_siginfo) {
			fprintf(stderr,
			    "%s: phase 5: cyl group %d of %d (%d%%)\n",
			    cdevname(), c, fs->fs_ncg,
			    c * 100 / fs->fs_ncg);
			got_siginfo = 0;
		}
#ifdef PROGRESS
		progress_bar(cdevname(), preen ? NULL : "phase 5",
			    c, fs->fs_ncg);
#endif /* PROGRESS */
		getblk(&cgblk, cgtod(fs, c), fs->fs_cgsize);
		memcpy(cg, cgblk.b_un.b_cg, fs->fs_cgsize);
		if((doswap && !needswap) || (!doswap && needswap))
			ffs_cg_swap(cgblk.b_un.b_cg, cg, sblock);
		if (!doinglevel1 && !cg_chkmagic(cg, 0))
			pfatal("CG %d: PASS5: BAD MAGIC NUMBER\n", c);
		if(doswap)
			cgdirty();
		/*
		 * While we have the disk head where we want it,
		 * write back the superblock to the spare at this
		 * cylinder group.
		 */
		if ((cvtlevel && sblk.b_dirty) || doswap) {
			bwrite(fswritefd, sblk.b_un.b_buf,
			    fsbtodb(sblock, cgsblock(sblock, c)),
			    sblock->fs_sbsize);
		} else {
			/*
			 * Read in the current alternate superblock,
			 * and compare it to the master.  If it's
			 * wrong, fix it up.
			 */
			getblk(&asblk, cgsblock(sblock, c), sblock->fs_sbsize);
			if (asblk.b_errs)
				pfatal("CG %d: UNABLE TO READ ALTERNATE "
				    "SUPERBLK\n", c);
			else {
				memmove(altsblock, asblk.b_un.b_fs,
				    sblock->fs_sbsize);
				if (needswap)
					ffs_sb_swap(asblk.b_un.b_fs, altsblock);
			}
			sb_oldfscompat_write(sblock, sblocksave);
			if ((asblk.b_errs || cmpsblks(sblock, altsblock)) &&
			     dofix(&idesc[3],
				   "ALTERNATE SUPERBLK(S) ARE INCORRECT")) {
				bwrite(fswritefd, sblk.b_un.b_buf,
				    fsbtodb(sblock, cgsblock(sblock, c)),
				    sblock->fs_sbsize);
			}
			sb_oldfscompat_read(sblock, 0);
		}
		dbase = cgbase(fs, c);
		dmax = dbase + fs->fs_fpg;
		if (dmax > fs->fs_size)
			dmax = fs->fs_size;
		if (is_ufs2 || (fs->fs_old_flags & FS_FLAGS_UPDATED))
			newcg->cg_time = cg->cg_time;
		newcg->cg_old_time = cg->cg_old_time;
		newcg->cg_cgx = c;
		newcg->cg_ndblk = dmax - dbase;
		if (!is_ufs2) {
			if (c == fs->fs_ncg - 1) {
				/* Avoid fighting old fsck for this value.  Its never used
				 * outside of this check anyway.
				 */
				if ((fs->fs_old_flags & FS_FLAGS_UPDATED) == 0)
					newcg->cg_old_ncyl = fs->fs_old_ncyl % fs->fs_old_cpg;
				else
					newcg->cg_old_ncyl = howmany(newcg->cg_ndblk,
					    fs->fs_fpg / fs->fs_old_cpg);
			} else
				newcg->cg_old_ncyl = fs->fs_old_cpg;
			newcg->cg_old_niblk = fs->fs_ipg;
			newcg->cg_niblk = 0;
		}
		if (fs->fs_contigsumsize > 0)
			newcg->cg_nclusterblks = newcg->cg_ndblk / fs->fs_frag;
		newcg->cg_cs.cs_ndir = 0;
		newcg->cg_cs.cs_nffree = 0;
		newcg->cg_cs.cs_nbfree = 0;
		newcg->cg_cs.cs_nifree = fs->fs_ipg;
		if (cg->cg_rotor >= 0 && cg->cg_rotor < newcg->cg_ndblk)
			newcg->cg_rotor = cg->cg_rotor;
		else
			newcg->cg_rotor = 0;
		if (cg->cg_frotor >= 0 && cg->cg_frotor < newcg->cg_ndblk)
			newcg->cg_frotor = cg->cg_frotor;
		else
			newcg->cg_frotor = 0;
		if (cg->cg_irotor >= 0 && cg->cg_irotor < fs->fs_ipg)
			newcg->cg_irotor = cg->cg_irotor;
		else
			newcg->cg_irotor = 0;
		if (!is_ufs2) {
			newcg->cg_initediblk = 0;
		} else {
			if ((unsigned)cg->cg_initediblk > fs->fs_ipg)
				newcg->cg_initediblk = fs->fs_ipg;
			else
				newcg->cg_initediblk = cg->cg_initediblk;
		}
		memset(&newcg->cg_frsum[0], 0, sizeof newcg->cg_frsum);
		memset(&old_cg_blktot(newcg, 0)[0], 0, (size_t)(sumsize));
		memset(cg_inosused(newcg, 0), 0, (size_t)(mapsize));
		if (!is_ufs2 && ((fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) &&
		    fs->fs_old_postblformat == FS_42POSTBLFMT)
			ocg->cg_magic = CG_MAGIC;
		j = fs->fs_ipg * c;
		for (i = 0; i < fs->fs_ipg; j++, i++) {
			info = inoinfo(j);
			switch (info->ino_state) {

			case USTATE:
				break;

			case DSTATE:
			case DCLEAR:
			case DFOUND:
				newcg->cg_cs.cs_ndir++;
				/* fall through */

			case FSTATE:
			case FCLEAR:
				newcg->cg_cs.cs_nifree--;
				setbit(cg_inosused(newcg, 0), i);
				break;

			default:
				if (j < ROOTINO)
					break;
				errexit("BAD STATE %d FOR INODE I=%ld",
				    info->ino_state, (long)j);
			}
		}
		if (c == 0)
			for (i = 0; i < ROOTINO; i++) {
				setbit(cg_inosused(newcg, 0), i);
				newcg->cg_cs.cs_nifree--;
			}
		for (i = 0, d = dbase;
		     d < dmax;
		     d += fs->fs_frag, i += fs->fs_frag) {
			frags = 0;
			for (j = 0; j < fs->fs_frag; j++) {
				if (testbmap(d + j))
					continue;
				setbit(cg_blksfree(newcg, 0), i + j);
				frags++;
			}
			if (frags == fs->fs_frag) {
				newcg->cg_cs.cs_nbfree++;
				if (sumsize) {
					j = old_cbtocylno(fs, i);
					old_cg_blktot(newcg, 0)[j]++;
					old_cg_blks(fs, newcg, j, 0)[old_cbtorpos(fs, i)]++;
				}
				if (fs->fs_contigsumsize > 0)
					setbit(cg_clustersfree(newcg, 0),
					    fragstoblks(fs, i));
			} else if (frags > 0) {
				newcg->cg_cs.cs_nffree += frags;
				blk = blkmap(fs, cg_blksfree(newcg, 0), i);
				ffs_fragacct(fs, blk, newcg->cg_frsum, 1, 0);
			}
		}
		if (fs->fs_contigsumsize > 0) {
			int32_t *sump = cg_clustersum(newcg, 0);
			u_char *mapp = cg_clustersfree(newcg, 0);
			int map = *mapp++;
			int bit = 1;
			int run = 0;

			for (i = 0; i < newcg->cg_nclusterblks; i++) {
				if ((map & bit) != 0) {
					run++;
				} else if (run != 0) {
					if (run > fs->fs_contigsumsize)
						run = fs->fs_contigsumsize;
					sump[run]++;
					run = 0;
				}
				if ((i & (NBBY - 1)) != (NBBY - 1)) {
					bit <<= 1;
				} else {
					map = *mapp++;
					bit = 1;
				}
			}
			if (run != 0) {
				if (run > fs->fs_contigsumsize)
					run = fs->fs_contigsumsize;
				sump[run]++;
			}
		}
		cstotal.cs_nffree += newcg->cg_cs.cs_nffree;
		cstotal.cs_nbfree += newcg->cg_cs.cs_nbfree;
		cstotal.cs_nifree += newcg->cg_cs.cs_nifree;
		cstotal.cs_ndir += newcg->cg_cs.cs_ndir;
		cs = &fs->fs_cs(fs, c);
		if (memcmp(&newcg->cg_cs, cs, sizeof *cs) != 0) {
			if (debug) {
				printf("cg %d: nffree: %d/%d nbfree %d/%d"
					" nifree %d/%d ndir %d/%d\n",
					c, cs->cs_nffree,newcg->cg_cs.cs_nffree,
					cs->cs_nbfree,newcg->cg_cs.cs_nbfree,
					cs->cs_nifree,newcg->cg_cs.cs_nifree,
					cs->cs_ndir,newcg->cg_cs.cs_ndir);
			}
			if (dofix(&idesc[0], "FREE BLK COUNT(S) WRONG IN SUPERBLK")) {
				memmove(cs, &newcg->cg_cs, sizeof *cs);
				sbdirty();
			} else
				markclean = 0;
		}
		if (doinglevel1) {
			memmove(cg, newcg, (size_t)fs->fs_cgsize);
			cgdirty();
			continue;
		}
		if ((memcmp(newcg, cg, basesize) != 0) ||
		    (memcmp(&old_cg_blktot(newcg, 0)[0],
		        &old_cg_blktot(cg, 0)[0], sumsize) != 0)) {
		 	if (dofix(&idesc[2], "SUMMARY INFORMATION BAD")) {
				memmove(cg, newcg, (size_t)basesize);
				memmove(&old_cg_blktot(cg, 0)[0],
			       &old_cg_blktot(newcg, 0)[0], (size_t)sumsize);
				cgdirty();
			} else
				markclean = 0;
		}
		if (usedsoftdep) {
			for (i = 0; i < inomapsize; i++) {
				j = cg_inosused(newcg, 0)[i];
				if ((cg_inosused(cg, 0)[i] & j) == j)
					continue;
				for (k = 0; k < NBBY; k++) {
					if ((j & (1 << k)) == 0)
						continue;
					if (cg_inosused(cg, 0)[i] & (1 << k))
						continue;
					pwarn("ALLOCATED INODE %ld "
					    "MARKED FREE\n",
					    c * fs->fs_ipg + i * 8 + k);
				}
			}
			for (i = 0; i < blkmapsize; i++) {
				j = cg_blksfree(cg, 0)[i];
				if ((cg_blksfree(newcg, 0)[i] & j) == j)
					continue;
				for (k = 0; k < NBBY; k++) {
					if ((j & (1 << k)) == 0)
						continue;
					if (cg_inosused(cg, 0)[i] & (1 << k))
						continue;
					pwarn("ALLOCATED FRAG %ld "
					    "MARKED FREE\n",
					    c * fs->fs_fpg + i * 8 + k);
				}
			}
		}
		if (memcmp(cg_inosused(newcg, 0), cg_inosused(cg, 0), mapsize)
		    != 0 && dofix(&idesc[1], "BLK(S) MISSING IN BIT MAPS")) {
			memmove(cg_inosused(cg, 0), cg_inosused(newcg, 0),
			    (size_t)mapsize);
                        cgdirty();
                }
	}
	if (memcmp(&cstotal, &fs->fs_cstotal, cssize) != 0) {
		if (debug) {
			printf("total: nffree: %lld/%lld nbfree %lld/%lld"
				" nifree %lld/%lld ndir %lld/%lld\n",
				(long long int)fs->fs_cstotal.cs_nffree,
				(long long int)cstotal.cs_nffree,
				(long long int)fs->fs_cstotal.cs_nbfree,
				(long long int)cstotal.cs_nbfree,
				(long long int)fs->fs_cstotal.cs_nifree,
				(long long int)cstotal.cs_nifree,
				(long long int)fs->fs_cstotal.cs_ndir,
				(long long int)cstotal.cs_ndir);
		}
		if (dofix(&idesc[0], "FREE BLK COUNT(S) WRONG IN SUPERBLK")) {
			memmove(&fs->fs_cstotal, &cstotal, sizeof cstotal);
			fs->fs_ronly = 0;
			fs->fs_fmod = 0;
			sbdirty();
		} else
			markclean = 0;
	}
#ifdef PROGRESS
	if (!preen)
		progress_done();
#endif /* PROGRESS */
}
Esempio n. 25
0
void
check(char *file)
{
	int i, j, c;

	fi = open64(file, 0);
	if (fi < 0) {
		(void) fprintf(stderr, "ncheck: cannot open %s\n", file);
		nerror++;
		return;
	}
	nhent = 0;
	(void) printf("%s:\n", file);
	sync();
	bread((diskaddr_t)SBLOCK, (char *)&sblock, SBSIZE);
	if ((sblock.fs_magic != FS_MAGIC) &&
	    (sblock.fs_magic != MTB_UFS_MAGIC)) {
		(void) printf("%s: not a ufs file system\n", file);
		nerror++;
		return;
	}

	if ((sblock.fs_magic == FS_MAGIC) &&
	    ((sblock.fs_version != UFS_EFISTYLE4NONEFI_VERSION_2) &&
	    (sblock.fs_version != UFS_VERSION_MIN))) {
		(void) printf("%s: unrecognized ufs version number %d\n",
		    file, sblock.fs_version);
		nerror++;
		return;
	}

	if ((sblock.fs_magic == MTB_UFS_MAGIC) &&
	    ((sblock.fs_version > MTB_UFS_VERSION_1) ||
	    (sblock.fs_version < MTB_UFS_VERSION_MIN))) {
		(void) printf("%s: unrecognized ufs version number %d\n",
		    file, sblock.fs_version);
		nerror++;
		return;
	}

	/* If fs is logged, roll the log. */
	if (sblock.fs_logbno) {
		switch (rl_roll_log(file)) {
		case RL_SUCCESS:
			/*
			 * Reread the superblock.  Rolling the log may have
			 * changed it.
			 */
			bread((diskaddr_t)SBLOCK, (char *)&sblock, SBSIZE);
			break;
		case RL_SYSERR:
			(void) printf("Warning: cannot roll log for %s.  %s\n",
				file, strerror(errno));
			break;
		default:
			(void) printf("Warning: cannot roll log for %s.\n",
				file);
			break;
		}
	}

	itab = (struct dinode *)extend_tbl((uchar_t *)itab, &itab_size,
		(unsigned)(sblock.fs_ipg * sizeof (struct dinode)));
	if (itab == 0) {
		(void) fprintf(stderr,
			"ncheck: not enough memory for itab table\n");
		nerror++;
		return;
	}

	hsize = sblock.fs_ipg * sblock.fs_ncg - sblock.fs_cstotal.cs_nifree + 1;

	htab = (struct htab *)extend_tbl((uchar_t *)htab, &htab_size,
		(unsigned)(hsize * sizeof (struct htab)));
	if (htab == 0) {
		(void) fprintf(stderr,
			"ncheck: not enough memory for htab table\n");
		nerror++;
		return;
	}

	if (!extend_strngtab(AVG_PATH_LEN * hsize)) {
		(void) printf("not enough memory to allocate tables\n");
		nerror++;
		return;
	}
	strngloc = 0;

	ino = 0;
	for (c = 0; c < sblock.fs_ncg; c++) {
		bread(fsbtodb(&sblock, cgimin(&sblock, c)), (char *)itab,
		    (int)(sblock.fs_ipg * sizeof (struct dinode)));
		for (j = 0; j < sblock.fs_ipg; j++) {
			if (itab[j].di_smode != 0) {
				itab[j].di_mode = itab[j].di_smode;
				if (itab[j].di_suid != UID_LONG)
					itab[j].di_uid = itab[j].di_suid;
				if (itab[j].di_sgid != GID_LONG)
					itab[j].di_gid = itab[j].di_sgid;
				pass1(&itab[j]);
			}
			ino++;
		}
	}
	ilist[ilist_index++].ino = 0;
	if (ilist_index > MAX_ILIST_INDEX())
		extend_ilist();
	ino = 0;
	for (c = 0; c < sblock.fs_ncg; c++) {
		bread(fsbtodb(&sblock, cgimin(&sblock, c)), (char *)itab,
		    (int)(sblock.fs_ipg * sizeof (struct dinode)));
		for (j = 0; j < sblock.fs_ipg; j++) {

			if (itab[j].di_smode != 0) {
				itab[j].di_mode = itab[j].di_smode;
				pass2(&itab[j]);
			}
			ino++;
		}
	}
	ino = 0;
	for (c = 0; c < sblock.fs_ncg; c++) {
		bread(fsbtodb(&sblock, cgimin(&sblock, c)), (char *)itab,
		    (int)(sblock.fs_ipg * sizeof (struct dinode)));
		for (j = 0; j < sblock.fs_ipg; j++) {
			if (itab[j].di_smode != 0) {
				itab[j].di_mode = itab[j].di_smode;
				pass3(&itab[j]);
			}
			ino++;
		}
	}
	(void) close(fi);

	/*
	 * Clear those elements after inodes specified by "-i" out of
	 * ilist.
	 */
	for (i = iflg; i < ilist_index; i++) {
		ilist[i].ino = 0;
	}
	ilist_index = iflg;
}
Esempio n. 26
0
int
fsirand(char *device)
{
    struct ufs1_dinode *dp1;
    struct ufs2_dinode *dp2;
    caddr_t inodebuf;
    ssize_t ibufsize;
    struct fs *sblock;
    ino_t inumber;
    ufs2_daddr_t sblockloc, dblk;
    char sbuf[SBLOCKSIZE], sbuftmp[SBLOCKSIZE];
    int i, devfd, n, cg;
    u_int32_t bsize = DEV_BSIZE;
    struct disklabel label;

    if ((devfd = open(device, printonly ? O_RDONLY : O_RDWR)) < 0) {
        warn("can't open %s", device);
        return (1);
    }

    /* Get block size (usually 512) from disklabel if possible */
    if (!ignorelabel) {
        if (ioctl(devfd, DIOCGDINFO, &label) < 0)
            warn("can't read disklabel, using sector size of %d",
                 bsize);
        else
            bsize = label.d_secsize;
    }

    dp1 = NULL;
    dp2 = NULL;

    /* Read in master superblock */
    (void)memset(&sbuf, 0, sizeof(sbuf));
    sblock = (struct fs *)&sbuf;
    for (i = 0; sblock_try[i] != -1; i++) {
        sblockloc = sblock_try[i];
        if (lseek(devfd, sblockloc, SEEK_SET) == -1) {
            warn("can't seek to superblock (%jd) on %s",
                 (intmax_t)sblockloc, device);
            return (1);
        }
        if ((n = read(devfd, (void *)sblock, SBLOCKSIZE))!=SBLOCKSIZE) {
            warnx("can't read superblock on %s: %s", device,
                  (n < SBLOCKSIZE) ? "short read" : strerror(errno));
            return (1);
        }
        if ((sblock->fs_magic == FS_UFS1_MAGIC ||
                (sblock->fs_magic == FS_UFS2_MAGIC &&
                 sblock->fs_sblockloc == sblock_try[i])) &&
                sblock->fs_bsize <= MAXBSIZE &&
                sblock->fs_bsize >= (ssize_t)sizeof(struct fs))
            break;
    }
    if (sblock_try[i] == -1) {
        fprintf(stderr, "Cannot find file system superblock\n");
        return (1);
    }

    if (sblock->fs_magic == FS_UFS1_MAGIC &&
            sblock->fs_old_inodefmt < FS_44INODEFMT) {
        warnx("file system format is too old, sorry");
        return (1);
    }
    if (!force && !printonly && sblock->fs_clean != 1) {
        warnx("file system is not clean, fsck %s first", device);
        return (1);
    }

    /* Make sure backup superblocks are sane. */
    sblock = (struct fs *)&sbuftmp;
    for (cg = 0; cg < (int)sblock->fs_ncg; cg++) {
        dblk = fsbtodb(sblock, cgsblock(sblock, cg));
        if (lseek(devfd, (off_t)dblk * bsize, SEEK_SET) < 0) {
            warn("can't seek to %jd", (intmax_t)dblk * bsize);
            return (1);
        } else if ((n = write(devfd, (void *)sblock, SBLOCKSIZE)) != SBLOCKSIZE) {
            warn("can't read backup superblock %d on %s: %s",
                 cg + 1, device, (n < SBLOCKSIZE) ? "short write"
                 : strerror(errno));
            return (1);
        }
        if (sblock->fs_magic != FS_UFS1_MAGIC &&
                sblock->fs_magic != FS_UFS2_MAGIC) {
            warnx("bad magic number in backup superblock %d on %s",
                  cg + 1, device);
            return (1);
        }
        if (sblock->fs_sbsize > SBLOCKSIZE) {
            warnx("size of backup superblock %d on %s is preposterous",
                  cg + 1, device);
            return (1);
        }
    }
    sblock = (struct fs *)&sbuf;

    /* XXX - should really cap buffer at 512kb or so */
    if (sblock->fs_magic == FS_UFS1_MAGIC)
        ibufsize = sizeof(struct ufs1_dinode) * sblock->fs_ipg;
    else
        ibufsize = sizeof(struct ufs2_dinode) * sblock->fs_ipg;
    if ((inodebuf = malloc(ibufsize)) == NULL)
        errx(1, "can't allocate memory for inode buffer");

    if (printonly && (sblock->fs_id[0] || sblock->fs_id[1])) {
        if (sblock->fs_id[0])
            (void)printf("%s was randomized on %s", device,
                         ctime((void *)&(sblock->fs_id[0])));
        (void)printf("fsid: %x %x\n", sblock->fs_id[0],
                     sblock->fs_id[1]);
    }

    /* Randomize fs_id unless old 4.2BSD file system */
    if (!printonly) {
        /* Randomize fs_id and write out new sblock and backups */
        sblock->fs_id[0] = (u_int32_t)time(NULL);
        sblock->fs_id[1] = random();

        if (lseek(devfd, sblockloc, SEEK_SET) == -1) {
            warn("can't seek to superblock (%jd) on %s",
                 (intmax_t)sblockloc, device);
            return (1);
        }
        if ((n = write(devfd, (void *)sblock, SBLOCKSIZE)) !=
                SBLOCKSIZE) {
            warn("can't write superblock on %s: %s", device,
                 (n < SBLOCKSIZE) ? "short write" : strerror(errno));
            return (1);
        }
    }

    /* For each cylinder group, randomize inodes and update backup sblock */
    for (cg = 0, inumber = 0; cg < (int)sblock->fs_ncg; cg++) {
        /* Update superblock if appropriate */
        if (!printonly) {
            dblk = fsbtodb(sblock, cgsblock(sblock, cg));
            if (lseek(devfd, (off_t)dblk * bsize, SEEK_SET) < 0) {
                warn("can't seek to %jd",
                     (intmax_t)dblk * bsize);
                return (1);
            } else if ((n = write(devfd, (void *)sblock,
                                  SBLOCKSIZE)) != SBLOCKSIZE) {
                warn("can't write backup superblock %d on %s: %s",
                     cg + 1, device, (n < SBLOCKSIZE) ?
                     "short write" : strerror(errno));
                return (1);
            }
        }

        /* Read in inodes, then print or randomize generation nums */
        dblk = fsbtodb(sblock, ino_to_fsba(sblock, inumber));
        if (lseek(devfd, (off_t)dblk * bsize, SEEK_SET) < 0) {
            warn("can't seek to %jd", (intmax_t)dblk * bsize);
            return (1);
        } else if ((n = read(devfd, inodebuf, ibufsize)) != ibufsize) {
            warnx("can't read inodes: %s",
                  (n < ibufsize) ? "short read" : strerror(errno));
            return (1);
        }

        for (n = 0; n < (int)sblock->fs_ipg; n++, inumber++) {
            if (sblock->fs_magic == FS_UFS1_MAGIC)
                dp1 = &((struct ufs1_dinode *)inodebuf)[n];
            else
                dp2 = &((struct ufs2_dinode *)inodebuf)[n];
            if (inumber >= ROOTINO) {
                if (printonly)
                    (void)printf("ino %ju gen %08x\n",
                                 (uintmax_t)inumber,
                                 sblock->fs_magic == FS_UFS1_MAGIC ?
                                 dp1->di_gen : dp2->di_gen);
                else if (sblock->fs_magic == FS_UFS1_MAGIC)
                    dp1->di_gen = random();
                else
                    dp2->di_gen = random();
            }
        }

        /* Write out modified inodes */
        if (!printonly) {
            if (lseek(devfd, (off_t)dblk * bsize, SEEK_SET) < 0) {
                warn("can't seek to %jd",
                     (intmax_t)dblk * bsize);
                return (1);
            } else if ((n = write(devfd, inodebuf, ibufsize)) !=
                       ibufsize) {
                warnx("can't write inodes: %s",
                      (n != ibufsize) ? "short write" :
                      strerror(errno));
                return (1);
            }
        }
    }
    (void)close(devfd);

    return(0);
}
Esempio n. 27
0
/*
 * Reload all incore data for a filesystem (used after running fsck on
 * the root filesystem and finding things to fix). The filesystem must
 * be mounted read-only.
 *
 * Things to do to update the mount:
 *	1) invalidate all cached meta-data.
 *	2) re-read superblock from disk.
 *	3) invalidate all cluster summary information.
 *	4) invalidate all inactive vnodes.
 *	5) invalidate all cached file data.
 *	6) re-read inode data for all active vnodes.
 * XXX we are missing some steps, in particular # 3, this has to be reviewed.
 */
static int
ext2_reload(struct mount *mp, struct thread *td)
{
	struct vnode *vp, *mvp, *devvp;
	struct inode *ip;
	struct buf *bp;
	struct ext2fs *es;
	struct m_ext2fs *fs;
	struct csum *sump;
	int error, i;
	int32_t *lp;

	if ((mp->mnt_flag & MNT_RDONLY) == 0)
		return (EINVAL);
	/*
	 * Step 1: invalidate all cached meta-data.
	 */
	devvp = VFSTOEXT2(mp)->um_devvp;
	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
	if (vinvalbuf(devvp, 0, 0, 0) != 0)
		panic("ext2_reload: dirty1");
	VOP_UNLOCK(devvp, 0);

	/*
	 * Step 2: re-read superblock from disk.
	 * constants have been adjusted for ext2
	 */
	if ((error = bread(devvp, SBLOCK, SBSIZE, NOCRED, &bp)) != 0)
		return (error);
	es = (struct ext2fs *)bp->b_data;
	if (ext2_check_sb_compat(es, devvp->v_rdev, 0) != 0) {
		brelse(bp);
		return (EIO);		/* XXX needs translation */
	}
	fs = VFSTOEXT2(mp)->um_e2fs;
	bcopy(bp->b_data, fs->e2fs, sizeof(struct ext2fs));

	if((error = compute_sb_data(devvp, es, fs)) != 0) {
		brelse(bp);
		return (error);
	}
#ifdef UNKLAR
	if (fs->fs_sbsize < SBSIZE)
		bp->b_flags |= B_INVAL;
#endif
	brelse(bp);

	/*
	 * Step 3: invalidate all cluster summary information.
	 */
	if (fs->e2fs_contigsumsize > 0) {
		lp = fs->e2fs_maxcluster;
		sump = fs->e2fs_clustersum;
		for (i = 0; i < fs->e2fs_gcount; i++, sump++) {
			*lp++ = fs->e2fs_contigsumsize;
			sump->cs_init = 0;
			bzero(sump->cs_sum, fs->e2fs_contigsumsize + 1);
		}
	}

loop:
	MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
		/*
		 * Step 4: invalidate all cached file data.
		 */
		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
			MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
			goto loop;
		}
		if (vinvalbuf(vp, 0, 0, 0))
			panic("ext2_reload: dirty2");

		/*
		 * Step 5: re-read inode data for all active vnodes.
		 */
		ip = VTOI(vp);
		error = bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
		    (int)fs->e2fs_bsize, NOCRED, &bp);
		if (error) {
			VOP_UNLOCK(vp, 0);
			vrele(vp);
			MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
			return (error);
		}
		ext2_ei2i((struct ext2fs_dinode *) ((char *)bp->b_data +
		    EXT2_INODE_SIZE(fs) * ino_to_fsbo(fs, ip->i_number)), ip);
		brelse(bp);
		VOP_UNLOCK(vp, 0);
		vrele(vp);
	}
	return (0);
}
Esempio n. 28
0
/*
 * If the superblock doesn't already have a recorded journal location
 * then we allocate the journal in one of two positions:
 *
 *  - At the end of the partition after the filesystem if there's
 *    enough space.  "Enough space" is defined as >= 1MB of journal
 *    per 1GB of filesystem or 64MB, whichever is smaller.
 *
 *  - Inside the filesystem.  We try to allocate a contiguous journal
 *    based on the total filesystem size - the target is 1MB of journal
 *    per 1GB of filesystem, up to a maximum journal size of 64MB.  As
 *    a worst case allowing for fragmentation, we'll allocate a journal
 *    1/4 of the desired size but never smaller than 1MB.
 *
 *    XXX In the future if we allow for non-contiguous journal files we
 *    can tighten the above restrictions.
 *
 * XXX
 * These seems like a lot of duplication both here and in some of
 * the userland tools (fsck_ffs, dumpfs, tunefs) with similar 
 * "switch (fs_journal_location)" constructs.  Can we centralise
 * this sort of code somehow/somewhere?
 */
int
wapbl_log_position(struct mount *mp, struct fs *fs, struct vnode *devvp,
    daddr_t *startp, size_t *countp, size_t *blksizep, uint64_t *extradatap)
{
	struct ufsmount *ump = VFSTOUFS(mp);
	daddr_t logstart, logend, desired_logsize;
	uint64_t numsecs;
	unsigned secsize;
	int error, location;

	if (fs->fs_journal_version == UFS_WAPBL_VERSION) {
		switch (fs->fs_journal_location) {
		case UFS_WAPBL_JOURNALLOC_END_PARTITION:
			DPRINTF("found existing end-of-partition log\n");
			*startp = fs->fs_journallocs[UFS_WAPBL_EPART_ADDR];
			*countp = fs->fs_journallocs[UFS_WAPBL_EPART_COUNT];
			*blksizep = fs->fs_journallocs[UFS_WAPBL_EPART_BLKSZ];
			DPRINTF(" start = %lld, size = %zu, "
			    "blksize = %zu\n", *startp, *countp, *blksizep);
			return 0;

		case UFS_WAPBL_JOURNALLOC_IN_FILESYSTEM:
			DPRINTF("found existing in-filesystem log\n");
			*startp = fs->fs_journallocs[UFS_WAPBL_INFS_ADDR];
			*countp = fs->fs_journallocs[UFS_WAPBL_INFS_COUNT];
			*blksizep = fs->fs_journallocs[UFS_WAPBL_INFS_BLKSZ];
			DPRINTF(" start = %lld, size = %zu, "
			    "blksize = %zu\n", *startp, *countp, *blksizep);
			return 0;

		default:
			printf("ffs_wapbl: unknown journal type %d\n",
			    fs->fs_journal_location);
			return EINVAL;
		}
	}

	desired_logsize =
	    lfragtosize(fs, fs->fs_size) / UFS_WAPBL_JOURNAL_SCALE;
	DPRINTF("desired log size = %lld kB\n", desired_logsize / 1024);
	desired_logsize = max(desired_logsize, UFS_WAPBL_MIN_JOURNAL_SIZE);
	desired_logsize = min(desired_logsize, UFS_WAPBL_MAX_JOURNAL_SIZE);
	DPRINTF("adjusted desired log size = %lld kB\n",
	    desired_logsize / 1024);

	/* Is there space after after filesystem on partition for log? */
	logstart = fsbtodb(fs, fs->fs_size);
	error = wapbl_getdisksize(devvp, &numsecs, &secsize);
	if (error)
		return error;
	KDASSERT(secsize != 0);
	logend = btodb(numsecs * secsize);

	if (dbtob(logend - logstart) >= desired_logsize) {
		DPRINTF("enough space, use end-of-partition log\n");

		location = UFS_WAPBL_JOURNALLOC_END_PARTITION;
		*blksizep = secsize;

		*startp = logstart;
		*countp = (logend - logstart);
		*extradatap = 0;

		/* convert to physical block numbers */
		*startp = dbtob(*startp) / secsize;
		*countp = dbtob(*countp) / secsize;

		fs->fs_journallocs[UFS_WAPBL_EPART_ADDR] = *startp;
		fs->fs_journallocs[UFS_WAPBL_EPART_COUNT] = *countp;
		fs->fs_journallocs[UFS_WAPBL_EPART_BLKSZ] = *blksizep;
		fs->fs_journallocs[UFS_WAPBL_EPART_UNUSED] = *extradatap;
	} else {
		DPRINTF("end-of-partition has only %lld free\n",
		    logend - logstart);

		location = UFS_WAPBL_JOURNALLOC_IN_FILESYSTEM;
		*blksizep = secsize;

		error = wapbl_create_infs_log(mp, fs, devvp,
		                  startp, countp, extradatap);
		ffs_sync(mp, MNT_WAIT, FSCRED, curproc);

		/* convert to physical block numbers */
		*startp = dbtob(*startp) / secsize;
		*countp = dbtob(*countp) / secsize;

		fs->fs_journallocs[UFS_WAPBL_INFS_ADDR] = *startp;
		fs->fs_journallocs[UFS_WAPBL_INFS_COUNT] = *countp;
		fs->fs_journallocs[UFS_WAPBL_INFS_BLKSZ] = *blksizep;
		fs->fs_journallocs[UFS_WAPBL_INFS_INO] = *extradatap;
	}

	if (error == 0) {
		/* update superblock with log location */
		fs->fs_journal_version = UFS_WAPBL_VERSION;
		fs->fs_journal_location = location;
		fs->fs_journal_flags = 0;

		error = ffs_sbupdate(ump, MNT_WAIT);
	}

	return error;
}
Esempio n. 29
0
/*
 * Balloc defines the structure of filesystem storage
 * by allocating the physical blocks on a device given
 * the inode and the logical block number in a file.
 * This is the allocation strategy for UFS1. Below is
 * the allocation strategy for UFS2.
 */
int
ffs_balloc_ufs1(struct vnode *vp, off_t startoffset, int size,
    struct ucred *cred, int flags, struct buf **bpp)
{
	struct inode *ip;
	struct ufs1_dinode *dp;
	ufs_lbn_t lbn, lastlbn;
	struct fs *fs;
	ufs1_daddr_t nb;
	struct buf *bp, *nbp;
	struct ufsmount *ump;
	struct indir indirs[NIADDR + 2];
	int deallocated, osize, nsize, num, i, error;
	ufs2_daddr_t newb;
	ufs1_daddr_t *bap, pref;
	ufs1_daddr_t *allocib, *blkp, *allocblk, allociblk[NIADDR + 1];
	ufs2_daddr_t *lbns_remfree, lbns[NIADDR + 1];
	int unwindidx = -1;
	int saved_inbdflush;
	static struct timeval lastfail;
	static int curfail;
	int gbflags, reclaimed;

	ip = VTOI(vp);
	dp = ip->i_din1;
	fs = ITOFS(ip);
	ump = ITOUMP(ip);
	lbn = lblkno(fs, startoffset);
	size = blkoff(fs, startoffset) + size;
	reclaimed = 0;
	if (size > fs->fs_bsize)
		panic("ffs_balloc_ufs1: blk too big");
	*bpp = NULL;
	if (flags & IO_EXT)
		return (EOPNOTSUPP);
	if (lbn < 0)
		return (EFBIG);
	gbflags = (flags & BA_UNMAPPED) != 0 ? GB_UNMAPPED : 0;

	if (DOINGSOFTDEP(vp))
		softdep_prealloc(vp, MNT_WAIT);
	/*
	 * If the next write will extend the file into a new block,
	 * and the file is currently composed of a fragment
	 * this fragment has to be extended to be a full block.
	 */
	lastlbn = lblkno(fs, ip->i_size);
	if (lastlbn < NDADDR && lastlbn < lbn) {
		nb = lastlbn;
		osize = blksize(fs, ip, nb);
		if (osize < fs->fs_bsize && osize > 0) {
			UFS_LOCK(ump);
			error = ffs_realloccg(ip, nb, dp->di_db[nb],
			   ffs_blkpref_ufs1(ip, lastlbn, (int)nb,
			   &dp->di_db[0]), osize, (int)fs->fs_bsize, flags,
			   cred, &bp);
			if (error)
				return (error);
			if (DOINGSOFTDEP(vp))
				softdep_setup_allocdirect(ip, nb,
				    dbtofsb(fs, bp->b_blkno), dp->di_db[nb],
				    fs->fs_bsize, osize, bp);
			ip->i_size = smalllblktosize(fs, nb + 1);
			dp->di_size = ip->i_size;
			dp->di_db[nb] = dbtofsb(fs, bp->b_blkno);
			ip->i_flag |= IN_CHANGE | IN_UPDATE;
			if (flags & IO_SYNC)
				bwrite(bp);
			else if (DOINGASYNC(vp))
				bdwrite(bp);
			else
				bawrite(bp);
		}
	}
	/*
	 * The first NDADDR blocks are direct blocks
	 */
	if (lbn < NDADDR) {
		if (flags & BA_METAONLY)
			panic("ffs_balloc_ufs1: BA_METAONLY for direct block");
		nb = dp->di_db[lbn];
		if (nb != 0 && ip->i_size >= smalllblktosize(fs, lbn + 1)) {
			error = bread(vp, lbn, fs->fs_bsize, NOCRED, &bp);
			if (error) {
				brelse(bp);
				return (error);
			}
			bp->b_blkno = fsbtodb(fs, nb);
			*bpp = bp;
			return (0);
		}
		if (nb != 0) {
			/*
			 * Consider need to reallocate a fragment.
			 */
			osize = fragroundup(fs, blkoff(fs, ip->i_size));
			nsize = fragroundup(fs, size);
			if (nsize <= osize) {
				error = bread(vp, lbn, osize, NOCRED, &bp);
				if (error) {
					brelse(bp);
					return (error);
				}
				bp->b_blkno = fsbtodb(fs, nb);
			} else {
				UFS_LOCK(ump);
				error = ffs_realloccg(ip, lbn, dp->di_db[lbn],
				    ffs_blkpref_ufs1(ip, lbn, (int)lbn,
				    &dp->di_db[0]), osize, nsize, flags,
				    cred, &bp);
				if (error)
					return (error);
				if (DOINGSOFTDEP(vp))
					softdep_setup_allocdirect(ip, lbn,
					    dbtofsb(fs, bp->b_blkno), nb,
					    nsize, osize, bp);
			}
		} else {
			if (ip->i_size < smalllblktosize(fs, lbn + 1))
				nsize = fragroundup(fs, size);
			else
				nsize = fs->fs_bsize;
			UFS_LOCK(ump);
			error = ffs_alloc(ip, lbn,
			    ffs_blkpref_ufs1(ip, lbn, (int)lbn, &dp->di_db[0]),
			    nsize, flags, cred, &newb);
			if (error)
				return (error);
			bp = getblk(vp, lbn, nsize, 0, 0, gbflags);
			bp->b_blkno = fsbtodb(fs, newb);
			if (flags & BA_CLRBUF)
				vfs_bio_clrbuf(bp);
			if (DOINGSOFTDEP(vp))
				softdep_setup_allocdirect(ip, lbn, newb, 0,
				    nsize, 0, bp);
		}
		dp->di_db[lbn] = dbtofsb(fs, bp->b_blkno);
		ip->i_flag |= IN_CHANGE | IN_UPDATE;
		*bpp = bp;
		return (0);
	}
	/*
	 * Determine the number of levels of indirection.
	 */
	pref = 0;
	if ((error = ufs_getlbns(vp, lbn, indirs, &num)) != 0)
		return(error);
#ifdef INVARIANTS
	if (num < 1)
		panic ("ffs_balloc_ufs1: ufs_getlbns returned indirect block");
#endif
	saved_inbdflush = curthread_pflags_set(TDP_INBDFLUSH);
	/*
	 * Fetch the first indirect block allocating if necessary.
	 */
	--num;
	nb = dp->di_ib[indirs[0].in_off];
	allocib = NULL;
	allocblk = allociblk;
	lbns_remfree = lbns;
	if (nb == 0) {
		UFS_LOCK(ump);
		pref = ffs_blkpref_ufs1(ip, lbn, -indirs[0].in_off - 1,
		    (ufs1_daddr_t *)0);
		if ((error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize,
		    flags, cred, &newb)) != 0) {
			curthread_pflags_restore(saved_inbdflush);
			return (error);
		}
		pref = newb + fs->fs_frag;
		nb = newb;
		MPASS(allocblk < allociblk + nitems(allociblk));
		MPASS(lbns_remfree < lbns + nitems(lbns));
		*allocblk++ = nb;
		*lbns_remfree++ = indirs[1].in_lbn;
		bp = getblk(vp, indirs[1].in_lbn, fs->fs_bsize, 0, 0, gbflags);
		bp->b_blkno = fsbtodb(fs, nb);
		vfs_bio_clrbuf(bp);
		if (DOINGSOFTDEP(vp)) {
			softdep_setup_allocdirect(ip, NDADDR + indirs[0].in_off,
			    newb, 0, fs->fs_bsize, 0, bp);
			bdwrite(bp);
		} else if ((flags & IO_SYNC) == 0 && DOINGASYNC(vp)) {
			if (bp->b_bufsize == fs->fs_bsize)
				bp->b_flags |= B_CLUSTEROK;
			bdwrite(bp);
		} else {
			if ((error = bwrite(bp)) != 0)
				goto fail;
		}
		allocib = &dp->di_ib[indirs[0].in_off];
		*allocib = nb;
		ip->i_flag |= IN_CHANGE | IN_UPDATE;
	}
	/*
	 * Fetch through the indirect blocks, allocating as necessary.
	 */
retry:
	for (i = 1;;) {
		error = bread(vp,
		    indirs[i].in_lbn, (int)fs->fs_bsize, NOCRED, &bp);
		if (error) {
			brelse(bp);
			goto fail;
		}
		bap = (ufs1_daddr_t *)bp->b_data;
		nb = bap[indirs[i].in_off];
		if (i == num)
			break;
		i += 1;
		if (nb != 0) {
			bqrelse(bp);
			continue;
		}
		UFS_LOCK(ump);
		/*
		 * If parent indirect has just been allocated, try to cluster
		 * immediately following it.
		 */
		if (pref == 0)
			pref = ffs_blkpref_ufs1(ip, lbn, i - num - 1,
			    (ufs1_daddr_t *)0);
		if ((error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize,
		    flags | IO_BUFLOCKED, cred, &newb)) != 0) {
			brelse(bp);
			if (DOINGSOFTDEP(vp) && ++reclaimed == 1) {
				UFS_LOCK(ump);
				softdep_request_cleanup(fs, vp, cred,
				    FLUSH_BLOCKS_WAIT);
				UFS_UNLOCK(ump);
				goto retry;
			}
			if (ppsratecheck(&lastfail, &curfail, 1)) {
				ffs_fserr(fs, ip->i_number, "filesystem full");
				uprintf("\n%s: write failed, filesystem "
				    "is full\n", fs->fs_fsmnt);
			}
			goto fail;
		}
		pref = newb + fs->fs_frag;
		nb = newb;
		MPASS(allocblk < allociblk + nitems(allociblk));
		MPASS(lbns_remfree < lbns + nitems(lbns));
		*allocblk++ = nb;
		*lbns_remfree++ = indirs[i].in_lbn;
		nbp = getblk(vp, indirs[i].in_lbn, fs->fs_bsize, 0, 0, 0);
		nbp->b_blkno = fsbtodb(fs, nb);
		vfs_bio_clrbuf(nbp);
		if (DOINGSOFTDEP(vp)) {
			softdep_setup_allocindir_meta(nbp, ip, bp,
			    indirs[i - 1].in_off, nb);
			bdwrite(nbp);
		} else if ((flags & IO_SYNC) == 0 && DOINGASYNC(vp)) {
			if (nbp->b_bufsize == fs->fs_bsize)
				nbp->b_flags |= B_CLUSTEROK;
			bdwrite(nbp);
		} else {
			if ((error = bwrite(nbp)) != 0) {
				brelse(bp);
				goto fail;
			}
		}
		bap[indirs[i - 1].in_off] = nb;
		if (allocib == NULL && unwindidx < 0)
			unwindidx = i - 1;
		/*
		 * If required, write synchronously, otherwise use
		 * delayed write.
		 */
		if (flags & IO_SYNC) {
			bwrite(bp);
		} else {
			if (bp->b_bufsize == fs->fs_bsize)
				bp->b_flags |= B_CLUSTEROK;
			bdwrite(bp);
		}
	}
	/*
	 * If asked only for the indirect block, then return it.
	 */
	if (flags & BA_METAONLY) {
		curthread_pflags_restore(saved_inbdflush);
		*bpp = bp;
		return (0);
	}
	/*
	 * Get the data block, allocating if necessary.
	 */
	if (nb == 0) {
		UFS_LOCK(ump);
		/*
		 * If allocating metadata at the front of the cylinder
		 * group and parent indirect block has just been allocated,
		 * then cluster next to it if it is the first indirect in
		 * the file. Otherwise it has been allocated in the metadata
		 * area, so we want to find our own place out in the data area.
		 */
		if (pref == 0 || (lbn > NDADDR && fs->fs_metaspace != 0))
			pref = ffs_blkpref_ufs1(ip, lbn, indirs[i].in_off,
			    &bap[0]);
		error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize,
		    flags | IO_BUFLOCKED, cred, &newb);
		if (error) {
			brelse(bp);
			if (DOINGSOFTDEP(vp) && ++reclaimed == 1) {
				UFS_LOCK(ump);
				softdep_request_cleanup(fs, vp, cred,
				    FLUSH_BLOCKS_WAIT);
				UFS_UNLOCK(ump);
				goto retry;
			}
			if (ppsratecheck(&lastfail, &curfail, 1)) {
				ffs_fserr(fs, ip->i_number, "filesystem full");
				uprintf("\n%s: write failed, filesystem "
				    "is full\n", fs->fs_fsmnt);
			}
			goto fail;
		}
		nb = newb;
		MPASS(allocblk < allociblk + nitems(allociblk));
		MPASS(lbns_remfree < lbns + nitems(lbns));
		*allocblk++ = nb;
		*lbns_remfree++ = lbn;
		nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0, gbflags);
		nbp->b_blkno = fsbtodb(fs, nb);
		if (flags & BA_CLRBUF)
			vfs_bio_clrbuf(nbp);
		if (DOINGSOFTDEP(vp))
			softdep_setup_allocindir_page(ip, lbn, bp,
			    indirs[i].in_off, nb, 0, nbp);
		bap[indirs[i].in_off] = nb;
		/*
		 * If required, write synchronously, otherwise use
		 * delayed write.
		 */
		if (flags & IO_SYNC) {
			bwrite(bp);
		} else {
			if (bp->b_bufsize == fs->fs_bsize)
				bp->b_flags |= B_CLUSTEROK;
			bdwrite(bp);
		}
		curthread_pflags_restore(saved_inbdflush);
		*bpp = nbp;
		return (0);
	}
	brelse(bp);
	if (flags & BA_CLRBUF) {
		int seqcount = (flags & BA_SEQMASK) >> BA_SEQSHIFT;
		if (seqcount != 0 &&
		    (vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0 &&
		    !(vm_page_count_severe() || buf_dirty_count_severe())) {
			error = cluster_read(vp, ip->i_size, lbn,
			    (int)fs->fs_bsize, NOCRED,
			    MAXBSIZE, seqcount, gbflags, &nbp);
		} else {
			error = bread_gb(vp, lbn, (int)fs->fs_bsize, NOCRED,
			    gbflags, &nbp);
		}
		if (error) {
			brelse(nbp);
			goto fail;
		}
	} else {
Esempio n. 30
0
void
mkfs(struct partition *pp, char *fsys)
{
	int fragsperinode, optimalfpg, origdensity, minfpg, lastminfpg;
	long i, j, csfrags;
	uint cg;
	time_t utime;
	quad_t sizepb;
	int width;
	ino_t maxinum;
	int minfragsperinode;	/* minimum ratio of frags to inodes */
	char tmpbuf[100];	/* XXX this will break in about 2,500 years */
	struct fsrecovery *fsr;
	char *fsrbuf;
	union {
		struct fs fdummy;
		char cdummy[SBLOCKSIZE];
	} dummy;
#define fsdummy dummy.fdummy
#define chdummy dummy.cdummy

	/*
	 * Our blocks == sector size, and the version of UFS we are using is
	 * specified by Oflag.
	 */
	disk.d_bsize = sectorsize;
	disk.d_ufs = Oflag;
	if (Rflag)
		utime = 1000000000;
	else
		time(&utime);
	sblock.fs_old_flags = FS_FLAGS_UPDATED;
	sblock.fs_flags = 0;
	if (Uflag)
		sblock.fs_flags |= FS_DOSOFTDEP;
	if (Lflag)
		strlcpy(sblock.fs_volname, volumelabel, MAXVOLLEN);
	if (Jflag)
		sblock.fs_flags |= FS_GJOURNAL;
	if (lflag)
		sblock.fs_flags |= FS_MULTILABEL;
	if (tflag)
		sblock.fs_flags |= FS_TRIM;
	/*
	 * Validate the given file system size.
	 * Verify that its last block can actually be accessed.
	 * Convert to file system fragment sized units.
	 */
	if (fssize <= 0) {
		printf("preposterous size %jd\n", (intmax_t)fssize);
		exit(13);
	}
	wtfs(fssize - (realsectorsize / DEV_BSIZE), realsectorsize,
	    (char *)&sblock);
	/*
	 * collect and verify the file system density info
	 */
	sblock.fs_avgfilesize = avgfilesize;
	sblock.fs_avgfpdir = avgfilesperdir;
	if (sblock.fs_avgfilesize <= 0)
		printf("illegal expected average file size %d\n",
		    sblock.fs_avgfilesize), exit(14);
	if (sblock.fs_avgfpdir <= 0)
		printf("illegal expected number of files per directory %d\n",
		    sblock.fs_avgfpdir), exit(15);

restart:
	/*
	 * collect and verify the block and fragment sizes
	 */
	sblock.fs_bsize = bsize;
	sblock.fs_fsize = fsize;
	if (!POWEROF2(sblock.fs_bsize)) {
		printf("block size must be a power of 2, not %d\n",
		    sblock.fs_bsize);
		exit(16);
	}
	if (!POWEROF2(sblock.fs_fsize)) {
		printf("fragment size must be a power of 2, not %d\n",
		    sblock.fs_fsize);
		exit(17);
	}
	if (sblock.fs_fsize < sectorsize) {
		printf("increasing fragment size from %d to sector size (%d)\n",
		    sblock.fs_fsize, sectorsize);
		sblock.fs_fsize = sectorsize;
	}
	if (sblock.fs_bsize > MAXBSIZE) {
		printf("decreasing block size from %d to maximum (%d)\n",
		    sblock.fs_bsize, MAXBSIZE);
		sblock.fs_bsize = MAXBSIZE;
	}
	if (sblock.fs_bsize < MINBSIZE) {
		printf("increasing block size from %d to minimum (%d)\n",
		    sblock.fs_bsize, MINBSIZE);
		sblock.fs_bsize = MINBSIZE;
	}
	if (sblock.fs_fsize > MAXBSIZE) {
		printf("decreasing fragment size from %d to maximum (%d)\n",
		    sblock.fs_fsize, MAXBSIZE);
		sblock.fs_fsize = MAXBSIZE;
	}
	if (sblock.fs_bsize < sblock.fs_fsize) {
		printf("increasing block size from %d to fragment size (%d)\n",
		    sblock.fs_bsize, sblock.fs_fsize);
		sblock.fs_bsize = sblock.fs_fsize;
	}
	if (sblock.fs_fsize * MAXFRAG < sblock.fs_bsize) {
		printf(
		"increasing fragment size from %d to block size / %d (%d)\n",
		    sblock.fs_fsize, MAXFRAG, sblock.fs_bsize / MAXFRAG);
		sblock.fs_fsize = sblock.fs_bsize / MAXFRAG;
	}
	if (maxbsize == 0)
		maxbsize = bsize;
	if (maxbsize < bsize || !POWEROF2(maxbsize)) {
		sblock.fs_maxbsize = sblock.fs_bsize;
		printf("Extent size set to %d\n", sblock.fs_maxbsize);
	} else if (sblock.fs_maxbsize > FS_MAXCONTIG * sblock.fs_bsize) {
		sblock.fs_maxbsize = FS_MAXCONTIG * sblock.fs_bsize;
		printf("Extent size reduced to %d\n", sblock.fs_maxbsize);
	} else {
		sblock.fs_maxbsize = maxbsize;
	}
	/*
	 * Maxcontig sets the default for the maximum number of blocks
	 * that may be allocated sequentially. With file system clustering
	 * it is possible to allocate contiguous blocks up to the maximum
	 * transfer size permitted by the controller or buffering.
	 */
	if (maxcontig == 0)
		maxcontig = MAX(1, MAXPHYS / bsize);
	sblock.fs_maxcontig = maxcontig;
	if (sblock.fs_maxcontig < sblock.fs_maxbsize / sblock.fs_bsize) {
		sblock.fs_maxcontig = sblock.fs_maxbsize / sblock.fs_bsize;
		printf("Maxcontig raised to %d\n", sblock.fs_maxbsize);
	}
	if (sblock.fs_maxcontig > 1)
		sblock.fs_contigsumsize = MIN(sblock.fs_maxcontig,FS_MAXCONTIG);
	sblock.fs_bmask = ~(sblock.fs_bsize - 1);
	sblock.fs_fmask = ~(sblock.fs_fsize - 1);
	sblock.fs_qbmask = ~sblock.fs_bmask;
	sblock.fs_qfmask = ~sblock.fs_fmask;
	sblock.fs_bshift = ilog2(sblock.fs_bsize);
	sblock.fs_fshift = ilog2(sblock.fs_fsize);
	sblock.fs_frag = numfrags(&sblock, sblock.fs_bsize);
	sblock.fs_fragshift = ilog2(sblock.fs_frag);
	if (sblock.fs_frag > MAXFRAG) {
		printf("fragment size %d is still too small (can't happen)\n",
		    sblock.fs_bsize / MAXFRAG);
		exit(21);
	}
	sblock.fs_fsbtodb = ilog2(sblock.fs_fsize / sectorsize);
	sblock.fs_size = fssize = dbtofsb(&sblock, fssize);
	sblock.fs_providersize = dbtofsb(&sblock, mediasize / sectorsize);

	/*
	 * Before the filesystem is finally initialized, mark it
	 * as incompletely initialized.
	 */
	sblock.fs_magic = FS_BAD_MAGIC;

	if (Oflag == 1) {
		sblock.fs_sblockloc = SBLOCK_UFS1;
		sblock.fs_sblockactualloc = SBLOCK_UFS1;
		sblock.fs_nindir = sblock.fs_bsize / sizeof(ufs1_daddr_t);
		sblock.fs_inopb = sblock.fs_bsize / sizeof(struct ufs1_dinode);
		sblock.fs_maxsymlinklen = ((UFS_NDADDR + UFS_NIADDR) *
		    sizeof(ufs1_daddr_t));
		sblock.fs_old_inodefmt = FS_44INODEFMT;
		sblock.fs_old_cgoffset = 0;
		sblock.fs_old_cgmask = 0xffffffff;
		sblock.fs_old_size = sblock.fs_size;
		sblock.fs_old_rotdelay = 0;
		sblock.fs_old_rps = 60;
		sblock.fs_old_nspf = sblock.fs_fsize / sectorsize;
		sblock.fs_old_cpg = 1;
		sblock.fs_old_interleave = 1;
		sblock.fs_old_trackskew = 0;
		sblock.fs_old_cpc = 0;
		sblock.fs_old_postblformat = 1;
		sblock.fs_old_nrpos = 1;
	} else {
		sblock.fs_sblockloc = SBLOCK_UFS2;
		sblock.fs_sblockactualloc = SBLOCK_UFS2;
		sblock.fs_nindir = sblock.fs_bsize / sizeof(ufs2_daddr_t);
		sblock.fs_inopb = sblock.fs_bsize / sizeof(struct ufs2_dinode);
		sblock.fs_maxsymlinklen = ((UFS_NDADDR + UFS_NIADDR) *
		    sizeof(ufs2_daddr_t));
	}
	sblock.fs_sblkno =
	    roundup(howmany(sblock.fs_sblockloc + SBLOCKSIZE, sblock.fs_fsize),
		sblock.fs_frag);
	sblock.fs_cblkno = sblock.fs_sblkno +
	    roundup(howmany(SBLOCKSIZE, sblock.fs_fsize), sblock.fs_frag);
	sblock.fs_iblkno = sblock.fs_cblkno + sblock.fs_frag;
	sblock.fs_maxfilesize = sblock.fs_bsize * UFS_NDADDR - 1;
	for (sizepb = sblock.fs_bsize, i = 0; i < UFS_NIADDR; i++) {
		sizepb *= NINDIR(&sblock);
		sblock.fs_maxfilesize += sizepb;
	}

	/*
	 * It's impossible to create a snapshot in case that fs_maxfilesize
	 * is smaller than the fssize.
	 */
	if (sblock.fs_maxfilesize < (u_quad_t)fssize) {
		warnx("WARNING: You will be unable to create snapshots on this "
		      "file system.  Correct by using a larger blocksize.");
	}

	/*
	 * Calculate the number of blocks to put into each cylinder group.
	 *
	 * This algorithm selects the number of blocks per cylinder
	 * group. The first goal is to have at least enough data blocks
	 * in each cylinder group to meet the density requirement. Once
	 * this goal is achieved we try to expand to have at least
	 * MINCYLGRPS cylinder groups. Once this goal is achieved, we
	 * pack as many blocks into each cylinder group map as will fit.
	 *
	 * We start by calculating the smallest number of blocks that we
	 * can put into each cylinder group. If this is too big, we reduce
	 * the density until it fits.
	 */
	maxinum = (((int64_t)(1)) << 32) - INOPB(&sblock);
	minfragsperinode = 1 + fssize / maxinum;
	if (density == 0) {
		density = MAX(NFPI, minfragsperinode) * fsize;
	} else if (density < minfragsperinode * fsize) {
		origdensity = density;
		density = minfragsperinode * fsize;
		fprintf(stderr, "density increased from %d to %d\n",
		    origdensity, density);
	}
	origdensity = density;
	for (;;) {
		fragsperinode = MAX(numfrags(&sblock, density), 1);
		if (fragsperinode < minfragsperinode) {
			bsize <<= 1;
			fsize <<= 1;
			printf("Block size too small for a file system %s %d\n",
			     "of this size. Increasing blocksize to", bsize);
			goto restart;
		}
		minfpg = fragsperinode * INOPB(&sblock);
		if (minfpg > sblock.fs_size)
			minfpg = sblock.fs_size;
		sblock.fs_ipg = INOPB(&sblock);
		sblock.fs_fpg = roundup(sblock.fs_iblkno +
		    sblock.fs_ipg / INOPF(&sblock), sblock.fs_frag);
		if (sblock.fs_fpg < minfpg)
			sblock.fs_fpg = minfpg;
		sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode),
		    INOPB(&sblock));
		sblock.fs_fpg = roundup(sblock.fs_iblkno +
		    sblock.fs_ipg / INOPF(&sblock), sblock.fs_frag);
		if (sblock.fs_fpg < minfpg)
			sblock.fs_fpg = minfpg;
		sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode),
		    INOPB(&sblock));
		if (CGSIZE(&sblock) < (unsigned long)sblock.fs_bsize)
			break;
		density -= sblock.fs_fsize;
	}
	if (density != origdensity)
		printf("density reduced from %d to %d\n", origdensity, density);
	/*
	 * Start packing more blocks into the cylinder group until
	 * it cannot grow any larger, the number of cylinder groups
	 * drops below MINCYLGRPS, or we reach the size requested.
	 * For UFS1 inodes per cylinder group are stored in an int16_t
	 * so fs_ipg is limited to 2^15 - 1.
	 */
	for ( ; sblock.fs_fpg < maxblkspercg; sblock.fs_fpg += sblock.fs_frag) {
		sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode),
		    INOPB(&sblock));
		if (Oflag > 1 || (Oflag == 1 && sblock.fs_ipg <= 0x7fff)) {
			if (sblock.fs_size / sblock.fs_fpg < MINCYLGRPS)
				break;
			if (CGSIZE(&sblock) < (unsigned long)sblock.fs_bsize)
				continue;
			if (CGSIZE(&sblock) == (unsigned long)sblock.fs_bsize)
				break;
		}
		sblock.fs_fpg -= sblock.fs_frag;
		sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode),
		    INOPB(&sblock));
		break;
	}
	/*
	 * Check to be sure that the last cylinder group has enough blocks
	 * to be viable. If it is too small, reduce the number of blocks
	 * per cylinder group which will have the effect of moving more
	 * blocks into the last cylinder group.
	 */
	optimalfpg = sblock.fs_fpg;
	for (;;) {
		sblock.fs_ncg = howmany(sblock.fs_size, sblock.fs_fpg);
		lastminfpg = roundup(sblock.fs_iblkno +
		    sblock.fs_ipg / INOPF(&sblock), sblock.fs_frag);
		if (sblock.fs_size < lastminfpg) {
			printf("Filesystem size %jd < minimum size of %d\n",
			    (intmax_t)sblock.fs_size, lastminfpg);
			exit(28);
		}
		if (sblock.fs_size % sblock.fs_fpg >= lastminfpg ||
		    sblock.fs_size % sblock.fs_fpg == 0)
			break;
		sblock.fs_fpg -= sblock.fs_frag;
		sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode),
		    INOPB(&sblock));
	}
	if (optimalfpg != sblock.fs_fpg)
		printf("Reduced frags per cylinder group from %d to %d %s\n",
		   optimalfpg, sblock.fs_fpg, "to enlarge last cyl group");
	sblock.fs_cgsize = fragroundup(&sblock, CGSIZE(&sblock));
	sblock.fs_dblkno = sblock.fs_iblkno + sblock.fs_ipg / INOPF(&sblock);
	if (Oflag == 1) {
		sblock.fs_old_spc = sblock.fs_fpg * sblock.fs_old_nspf;
		sblock.fs_old_nsect = sblock.fs_old_spc;
		sblock.fs_old_npsect = sblock.fs_old_spc;
		sblock.fs_old_ncyl = sblock.fs_ncg;
	}
	/*
	 * fill in remaining fields of the super block
	 */
	sblock.fs_csaddr = cgdmin(&sblock, 0);
	sblock.fs_cssize =
	    fragroundup(&sblock, sblock.fs_ncg * sizeof(struct csum));
	fscs = (struct csum *)calloc(1, sblock.fs_cssize);
	if (fscs == NULL)
		errx(31, "calloc failed");
	sblock.fs_sbsize = fragroundup(&sblock, sizeof(struct fs));
	if (sblock.fs_sbsize > SBLOCKSIZE)
		sblock.fs_sbsize = SBLOCKSIZE;
	if (sblock.fs_sbsize < realsectorsize)
		sblock.fs_sbsize = realsectorsize;
	sblock.fs_minfree = minfree;
	if (metaspace > 0 && metaspace < sblock.fs_fpg / 2)
		sblock.fs_metaspace = blknum(&sblock, metaspace);
	else if (metaspace != -1)
		/* reserve half of minfree for metadata blocks */
		sblock.fs_metaspace = blknum(&sblock,
		    (sblock.fs_fpg * minfree) / 200);
	if (maxbpg == 0)
		sblock.fs_maxbpg = MAXBLKPG(sblock.fs_bsize);
	else
		sblock.fs_maxbpg = maxbpg;
	sblock.fs_optim = opt;
	sblock.fs_cgrotor = 0;
	sblock.fs_pendingblocks = 0;
	sblock.fs_pendinginodes = 0;
	sblock.fs_fmod = 0;
	sblock.fs_ronly = 0;
	sblock.fs_state = 0;
	sblock.fs_clean = 1;
	sblock.fs_id[0] = (long)utime;
	sblock.fs_id[1] = newfs_random();
	sblock.fs_fsmnt[0] = '\0';
	csfrags = howmany(sblock.fs_cssize, sblock.fs_fsize);
	sblock.fs_dsize = sblock.fs_size - sblock.fs_sblkno -
	    sblock.fs_ncg * (sblock.fs_dblkno - sblock.fs_sblkno);
	sblock.fs_cstotal.cs_nbfree =
	    fragstoblks(&sblock, sblock.fs_dsize) -
	    howmany(csfrags, sblock.fs_frag);
	sblock.fs_cstotal.cs_nffree =
	    fragnum(&sblock, sblock.fs_size) +
	    (fragnum(&sblock, csfrags) > 0 ?
	     sblock.fs_frag - fragnum(&sblock, csfrags) : 0);
	sblock.fs_cstotal.cs_nifree =
	    sblock.fs_ncg * sblock.fs_ipg - UFS_ROOTINO;
	sblock.fs_cstotal.cs_ndir = 0;
	sblock.fs_dsize -= csfrags;
	sblock.fs_time = utime;
	if (Oflag == 1) {
		sblock.fs_old_time = utime;
		sblock.fs_old_dsize = sblock.fs_dsize;
		sblock.fs_old_csaddr = sblock.fs_csaddr;
		sblock.fs_old_cstotal.cs_ndir = sblock.fs_cstotal.cs_ndir;
		sblock.fs_old_cstotal.cs_nbfree = sblock.fs_cstotal.cs_nbfree;
		sblock.fs_old_cstotal.cs_nifree = sblock.fs_cstotal.cs_nifree;
		sblock.fs_old_cstotal.cs_nffree = sblock.fs_cstotal.cs_nffree;
	}
	/*
	 * Set flags for metadata that is being check-hashed.
	 *
	 * Metadata check hashes are not supported in the UFS version 1
	 * filesystem to keep it as small and simple as possible.
	 */
	if (Oflag > 1) {
		sblock.fs_flags |= FS_METACKHASH;
		if (getosreldate() >= P_OSREL_CK_CYLGRP)
			sblock.fs_metackhash |= CK_CYLGRP;
		if (getosreldate() >= P_OSREL_CK_SUPERBLOCK)
			sblock.fs_metackhash |= CK_SUPERBLOCK;
		if (getosreldate() >= P_OSREL_CK_INODE)
			sblock.fs_metackhash |= CK_INODE;
	}

	/*
	 * Dump out summary information about file system.
	 */
#	define B2MBFACTOR (1 / (1024.0 * 1024.0))
	printf("%s: %.1fMB (%jd sectors) block size %d, fragment size %d\n",
	    fsys, (float)sblock.fs_size * sblock.fs_fsize * B2MBFACTOR,
	    (intmax_t)fsbtodb(&sblock, sblock.fs_size), sblock.fs_bsize,
	    sblock.fs_fsize);
	printf("\tusing %d cylinder groups of %.2fMB, %d blks, %d inodes.\n",
	    sblock.fs_ncg, (float)sblock.fs_fpg * sblock.fs_fsize * B2MBFACTOR,
	    sblock.fs_fpg / sblock.fs_frag, sblock.fs_ipg);
	if (sblock.fs_flags & FS_DOSOFTDEP)
		printf("\twith soft updates\n");
#	undef B2MBFACTOR

	if (Eflag && !Nflag) {
		printf("Erasing sectors [%jd...%jd]\n", 
		    sblock.fs_sblockloc / disk.d_bsize,
		    fsbtodb(&sblock, sblock.fs_size) - 1);
		berase(&disk, sblock.fs_sblockloc / disk.d_bsize,
		    sblock.fs_size * sblock.fs_fsize - sblock.fs_sblockloc);
	}
	/*
	 * Wipe out old UFS1 superblock(s) if necessary.
	 */
	if (!Nflag && Oflag != 1 && realsectorsize <= SBLOCK_UFS1) {
		i = bread(&disk, part_ofs + SBLOCK_UFS1 / disk.d_bsize, chdummy,
		    SBLOCKSIZE);
		if (i == -1)
			err(1, "can't read old UFS1 superblock: %s",
			    disk.d_error);

		if (fsdummy.fs_magic == FS_UFS1_MAGIC) {
			fsdummy.fs_magic = 0;
			bwrite(&disk, part_ofs + SBLOCK_UFS1 / disk.d_bsize,
			    chdummy, SBLOCKSIZE);
			for (cg = 0; cg < fsdummy.fs_ncg; cg++) {
				if (fsbtodb(&fsdummy, cgsblock(&fsdummy, cg)) >
				    fssize)
					break;
				bwrite(&disk, part_ofs + fsbtodb(&fsdummy,
				  cgsblock(&fsdummy, cg)), chdummy, SBLOCKSIZE);
			}
		}
	}
	if (!Nflag && sbput(disk.d_fd, &disk.d_fs, 0) != 0)
		err(1, "sbput: %s", disk.d_error);
	if (Xflag == 1) {
		printf("** Exiting on Xflag 1\n");
		exit(0);
	}
	if (Xflag == 2)
		printf("** Leaving BAD MAGIC on Xflag 2\n");
	else
		sblock.fs_magic = (Oflag != 1) ? FS_UFS2_MAGIC : FS_UFS1_MAGIC;

	/*
	 * Now build the cylinders group blocks and
	 * then print out indices of cylinder groups.
	 */
	printf("super-block backups (for fsck_ffs -b #) at:\n");
	i = 0;
	width = charsperline();
	/*
	 * Allocate space for two sets of inode blocks.
	 */
	iobufsize = 2 * sblock.fs_bsize;
	if ((iobuf = calloc(1, iobufsize)) == 0) {
		printf("Cannot allocate I/O buffer\n");
		exit(38);
	}
	/*
	 * Write out all the cylinder groups and backup superblocks.
	 */
	for (cg = 0; cg < sblock.fs_ncg; cg++) {
		if (!Nflag)
			initcg(cg, utime);
		j = snprintf(tmpbuf, sizeof(tmpbuf), " %jd%s",
		    (intmax_t)fsbtodb(&sblock, cgsblock(&sblock, cg)),
		    cg < (sblock.fs_ncg-1) ? "," : "");
		if (j < 0)
			tmpbuf[j = 0] = '\0';
		if (i + j >= width) {
			printf("\n");
			i = 0;
		}
		i += j;
		printf("%s", tmpbuf);
		fflush(stdout);
	}
	printf("\n");
	if (Nflag)
		exit(0);
	/*
	 * Now construct the initial file system,
	 * then write out the super-block.
	 */
	fsinit(utime);
	if (Oflag == 1) {
		sblock.fs_old_cstotal.cs_ndir = sblock.fs_cstotal.cs_ndir;
		sblock.fs_old_cstotal.cs_nbfree = sblock.fs_cstotal.cs_nbfree;
		sblock.fs_old_cstotal.cs_nifree = sblock.fs_cstotal.cs_nifree;
		sblock.fs_old_cstotal.cs_nffree = sblock.fs_cstotal.cs_nffree;
	}
	if (Xflag == 3) {
		printf("** Exiting on Xflag 3\n");
		exit(0);
	}
	/*
	 * Reference the summary information so it will also be written.
	 */
	sblock.fs_csp = fscs;
	if (sbput(disk.d_fd, &disk.d_fs, 0) != 0)
		err(1, "sbput: %s", disk.d_error);
	/*
	 * For UFS1 filesystems with a blocksize of 64K, the first
	 * alternate superblock resides at the location used for
	 * the default UFS2 superblock. As there is a valid
	 * superblock at this location, the boot code will use
	 * it as its first choice. Thus we have to ensure that
	 * all of its statistcs on usage are correct.
	 */
	if (Oflag == 1 && sblock.fs_bsize == 65536)
		wtfs(fsbtodb(&sblock, cgsblock(&sblock, 0)),
		    sblock.fs_bsize, (char *)&sblock);
	/*
	 * Read the last sector of the boot block, replace the last
	 * 20 bytes with the recovery information, then write it back.
	 * The recovery information only works for UFS2 filesystems.
	 */
	if (sblock.fs_magic == FS_UFS2_MAGIC) {
		if ((fsrbuf = malloc(realsectorsize)) == NULL || bread(&disk,
		    part_ofs + (SBLOCK_UFS2 - realsectorsize) / disk.d_bsize,
		    fsrbuf, realsectorsize) == -1)
			err(1, "can't read recovery area: %s", disk.d_error);
		fsr =
		    (struct fsrecovery *)&fsrbuf[realsectorsize - sizeof *fsr];
		fsr->fsr_magic = sblock.fs_magic;
		fsr->fsr_fpg = sblock.fs_fpg;
		fsr->fsr_fsbtodb = sblock.fs_fsbtodb;
		fsr->fsr_sblkno = sblock.fs_sblkno;
		fsr->fsr_ncg = sblock.fs_ncg;
		wtfs((SBLOCK_UFS2 - realsectorsize) / disk.d_bsize,
		    realsectorsize, fsrbuf);
		free(fsrbuf);
	}
	/*
	 * Update information about this partition in pack
	 * label, to that it may be updated on disk.
	 */
	if (pp != NULL) {
		pp->p_fstype = FS_BSDFFS;
		pp->p_fsize = sblock.fs_fsize;
		pp->p_frag = sblock.fs_frag;
		pp->p_cpg = sblock.fs_fpg;
	}
}