static int calc_num_blocks(struct inode *inode) { int l1_indir, l2_indir, l3_indir; struct fs *fs = inode->i_fs; int nfrags, numblocks = howmany(inode->i_size, fs->fs_bsize); if (numblocks < NDADDR) { nfrags = numfrags(fs, fragroundup(fs, inode->i_size)); } else { int nindirs = fs->fs_nindir; /* Calculate how many indirects do we need to hold these many blocks */ l1_indir = howmany(numblocks - NDADDR, nindirs); numblocks += l1_indir; l2_indir = howmany(l1_indir - 1, nindirs); numblocks += l2_indir; if (l2_indir) { l3_indir = howmany(l2_indir - 1, nindirs); numblocks += l3_indir; } nfrags = numblocks * fs->fs_frag; } return nfrags * (fs->fs_fsize >> DEV_BSHIFT); }
int cgbfree(struct uufsd *disk, ufs2_daddr_t bno, long size) { u_int8_t *blksfree; struct fs *fs; struct cg *cgp; ufs1_daddr_t fragno, cgbno; int i, cg, blk, frags, bbase; fs = &disk->d_fs; cg = dtog(fs, bno); if (cgread1(disk, cg) != 1) return (-1); cgp = &disk->d_cg; cgbno = dtogd(fs, bno); blksfree = cg_blksfree(cgp); if (size == fs->fs_bsize) { fragno = fragstoblks(fs, cgbno); ffs_setblock(fs, blksfree, fragno); ffs_clusteracct(fs, cgp, fragno, 1); cgp->cg_cs.cs_nbfree++; fs->fs_cstotal.cs_nbfree++; fs->fs_cs(fs, cg).cs_nbfree++; } else { bbase = cgbno - fragnum(fs, cgbno); /* * decrement the counts associated with the old frags */ blk = blkmap(fs, blksfree, bbase); ffs_fragacct(fs, blk, cgp->cg_frsum, -1); /* * deallocate the fragment */ frags = numfrags(fs, size); for (i = 0; i < frags; i++) setbit(blksfree, cgbno + i); cgp->cg_cs.cs_nffree += i; fs->fs_cstotal.cs_nffree += i; fs->fs_cs(fs, cg).cs_nffree += i; /* * add back in counts associated with the new frags */ blk = blkmap(fs, blksfree, bbase); ffs_fragacct(fs, blk, cgp->cg_frsum, 1); /* * if a complete block has been reassembled, account for it */ fragno = fragstoblks(fs, bbase); if (ffs_isblock(fs, blksfree, fragno)) { cgp->cg_cs.cs_nffree -= fs->fs_frag; fs->fs_cstotal.cs_nffree -= fs->fs_frag; fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; ffs_clusteracct(fs, cgp, fragno, 1); cgp->cg_cs.cs_nbfree++; fs->fs_cstotal.cs_nbfree++; fs->fs_cs(fs, cg).cs_nbfree++; } } return cgwrite(disk); }
/* * Print the block pointers for one inode. */ static void printblocks(ino_t inum, struct ufs1_dinode *dp) { char *bufp; int i, nfrags; long ndb, offset; printf("Blocks for inode %d:\n", inum); printf("Direct blocks:\n"); ndb = howmany(dp->di_size, sblock.fs_bsize); for (i = 0; i < NDADDR; i++) { if (dp->di_db[i] == 0) { putchar('\n'); return; } if (i > 0) printf(", "); printf("%d", dp->di_db[i]); if (--ndb == 0 && (offset = blkoff(&sblock, dp->di_size)) != 0) { nfrags = numfrags(&sblock, fragroundup(&sblock, offset)); printf(" (%d frag%s)", nfrags, nfrags > 1? "s": ""); } } putchar('\n'); if (dp->di_ib[0] == 0) return; bufp = malloc((unsigned int)sblock.fs_bsize); if (bufp == NULL) errx(EEXIT, "cannot allocate indirect block buffer"); printf("Indirect blocks:\n"); for (i = 0; i < NIADDR; i++) if (printindir(dp->di_ib[i], i, bufp) == 0) break; free(bufp); }
/* * Print the block pointers for one inode. */ static void printblocks(ino_t inum, union dinode *dp) { char *bufp; int i, nfrags; long ndb, offset; ufs2_daddr_t blkno; printf("Blocks for inode %d:\n", inum); printf("Direct blocks:\n"); ndb = howmany(DIP(dp, di_size), sblock.fs_bsize); for (i = 0; i < NDADDR && i < ndb; i++) { if (i > 0) printf(", "); blkno = DIP(dp, di_db[i]); printf("%jd", (intmax_t)blkno); } if (ndb <= NDADDR) { offset = blkoff(&sblock, DIP(dp, di_size)); if (offset != 0) { nfrags = numfrags(&sblock, fragroundup(&sblock, offset)); printf(" (%d frag%s)", nfrags, nfrags > 1? "s": ""); } } putchar('\n'); if (ndb <= NDADDR) return; bufp = malloc((unsigned int)sblock.fs_bsize); if (bufp == 0) errx(EEXIT, "cannot allocate indirect block buffer"); printf("Indirect blocks:\n"); for (i = 0; i < NIADDR; i++) printindir(DIP(dp, di_ib[i]), i, bufp); free(bufp); }
/* * Truncate the inode oip to at most length size, freeing the * disk blocks. */ int ext2_truncate(struct vnode *vp, off_t length, int flags, struct ucred *cred, struct thread *td) { struct vnode *ovp = vp; int32_t lastblock; struct inode *oip; int32_t bn, lbn, lastiblock[NIADDR], indir_lbn[NIADDR]; uint32_t oldblks[NDADDR + NIADDR], newblks[NDADDR + NIADDR]; struct m_ext2fs *fs; struct buf *bp; int offset, size, level; e4fs_daddr_t count, nblocks, blocksreleased = 0; int error, i, allerror; off_t osize; #ifdef INVARIANTS struct bufobj *bo; #endif oip = VTOI(ovp); #ifdef INVARIANTS bo = &ovp->v_bufobj; #endif ASSERT_VOP_LOCKED(vp, "ext2_truncate"); if (length < 0) return (EINVAL); if (ovp->v_type == VLNK && oip->i_size < ovp->v_mount->mnt_maxsymlinklen) { #ifdef INVARIANTS if (length != 0) panic("ext2_truncate: partial truncate of symlink"); #endif bzero((char *)&oip->i_shortlink, (u_int)oip->i_size); oip->i_size = 0; oip->i_flag |= IN_CHANGE | IN_UPDATE; return (ext2_update(ovp, 1)); } if (oip->i_size == length) { oip->i_flag |= IN_CHANGE | IN_UPDATE; return (ext2_update(ovp, 0)); } fs = oip->i_e2fs; osize = oip->i_size; /* * Lengthen the size of the file. We must ensure that the * last byte of the file is allocated. Since the smallest * value of osize is 0, length will be at least 1. */ if (osize < length) { if (length > oip->i_e2fs->e2fs_maxfilesize) return (EFBIG); vnode_pager_setsize(ovp, length); offset = blkoff(fs, length - 1); lbn = lblkno(fs, length - 1); flags |= BA_CLRBUF; error = ext2_balloc(oip, lbn, offset + 1, cred, &bp, flags); if (error) { vnode_pager_setsize(vp, osize); return (error); } oip->i_size = length; if (bp->b_bufsize == fs->e2fs_bsize) bp->b_flags |= B_CLUSTEROK; if (flags & IO_SYNC) bwrite(bp); else if (DOINGASYNC(ovp)) bdwrite(bp); else bawrite(bp); oip->i_flag |= IN_CHANGE | IN_UPDATE; return (ext2_update(ovp, !DOINGASYNC(ovp))); } /* * Shorten the size of the file. If the file is not being * truncated to a block boundry, the contents of the * partial block following the end of the file must be * zero'ed in case it ever become accessible again because * of subsequent file growth. */ /* I don't understand the comment above */ offset = blkoff(fs, length); if (offset == 0) { oip->i_size = length; } else { lbn = lblkno(fs, length); flags |= BA_CLRBUF; error = ext2_balloc(oip, lbn, offset, cred, &bp, flags); if (error) return (error); oip->i_size = length; size = blksize(fs, oip, lbn); bzero((char *)bp->b_data + offset, (u_int)(size - offset)); allocbuf(bp, size); if (bp->b_bufsize == fs->e2fs_bsize) bp->b_flags |= B_CLUSTEROK; if (flags & IO_SYNC) bwrite(bp); else if (DOINGASYNC(ovp)) bdwrite(bp); else bawrite(bp); } /* * Calculate index into inode's block list of * last direct and indirect blocks (if any) * which we want to keep. Lastblock is -1 when * the file is truncated to 0. */ lastblock = lblkno(fs, length + fs->e2fs_bsize - 1) - 1; lastiblock[SINGLE] = lastblock - NDADDR; lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs); lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs); nblocks = btodb(fs->e2fs_bsize); /* * Update file and block pointers on disk before we start freeing * blocks. If we crash before free'ing blocks below, the blocks * will be returned to the free list. lastiblock values are also * normalized to -1 for calls to ext2_indirtrunc below. */ for (level = TRIPLE; level >= SINGLE; level--) { oldblks[NDADDR + level] = oip->i_ib[level]; if (lastiblock[level] < 0) { oip->i_ib[level] = 0; lastiblock[level] = -1; } } for (i = 0; i < NDADDR; i++) { oldblks[i] = oip->i_db[i]; if (i > lastblock) oip->i_db[i] = 0; } oip->i_flag |= IN_CHANGE | IN_UPDATE; allerror = ext2_update(ovp, !DOINGASYNC(ovp)); /* * Having written the new inode to disk, save its new configuration * and put back the old block pointers long enough to process them. * Note that we save the new block configuration so we can check it * when we are done. */ for (i = 0; i < NDADDR; i++) { newblks[i] = oip->i_db[i]; oip->i_db[i] = oldblks[i]; } for (i = 0; i < NIADDR; i++) { newblks[NDADDR + i] = oip->i_ib[i]; oip->i_ib[i] = oldblks[NDADDR + i]; } oip->i_size = osize; error = vtruncbuf(ovp, cred, length, (int)fs->e2fs_bsize); if (error && (allerror == 0)) allerror = error; vnode_pager_setsize(ovp, length); /* * Indirect blocks first. */ indir_lbn[SINGLE] = -NDADDR; indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1; indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1; for (level = TRIPLE; level >= SINGLE; level--) { bn = oip->i_ib[level]; if (bn != 0) { error = ext2_indirtrunc(oip, indir_lbn[level], fsbtodb(fs, bn), lastiblock[level], level, &count); if (error) allerror = error; blocksreleased += count; if (lastiblock[level] < 0) { oip->i_ib[level] = 0; ext2_blkfree(oip, bn, fs->e2fs_fsize); blocksreleased += nblocks; } } if (lastiblock[level] >= 0) goto done; } /* * All whole direct blocks or frags. */ for (i = NDADDR - 1; i > lastblock; i--) { long bsize; bn = oip->i_db[i]; if (bn == 0) continue; oip->i_db[i] = 0; bsize = blksize(fs, oip, i); ext2_blkfree(oip, bn, bsize); blocksreleased += btodb(bsize); } if (lastblock < 0) goto done; /* * Finally, look for a change in size of the * last direct block; release any frags. */ bn = oip->i_db[lastblock]; if (bn != 0) { long oldspace, newspace; /* * Calculate amount of space we're giving * back as old block size minus new block size. */ oldspace = blksize(fs, oip, lastblock); oip->i_size = length; newspace = blksize(fs, oip, lastblock); if (newspace == 0) panic("ext2_truncate: newspace"); if (oldspace - newspace > 0) { /* * Block number of space to be free'd is * the old block # plus the number of frags * required for the storage we're keeping. */ bn += numfrags(fs, newspace); ext2_blkfree(oip, bn, oldspace - newspace); blocksreleased += btodb(oldspace - newspace); } } done: #ifdef INVARIANTS for (level = SINGLE; level <= TRIPLE; level++) if (newblks[NDADDR + level] != oip->i_ib[level]) panic("itrunc1"); for (i = 0; i < NDADDR; i++) if (newblks[i] != oip->i_db[i]) panic("itrunc2"); BO_LOCK(bo); if (length == 0 && (bo->bo_dirty.bv_cnt != 0 || bo->bo_clean.bv_cnt != 0)) panic("itrunc3"); BO_UNLOCK(bo); #endif /* INVARIANTS */ /* * Put back the real size. */ oip->i_size = length; if (oip->i_blocks >= blocksreleased) oip->i_blocks -= blocksreleased; else /* sanity */ oip->i_blocks = 0; oip->i_flag |= IN_CHANGE; vnode_pager_setsize(ovp, length); return (allerror); }
void mkfs(struct partition *pp, char *fsys, int fi, int fo, mode_t mfsmode, uid_t mfsuid, gid_t mfsgid) { time_t utime; quad_t sizepb; int i, j, width, origdensity, fragsperinode, minfpg, optimalfpg; int lastminfpg, mincylgrps; long cylno, csfrags; char tmpbuf[100]; /* XXX this will break in about 2,500 years */ if ((fsun = calloc(1, sizeof (union fs_u))) == NULL || (cgun = calloc(1, sizeof (union cg_u))) == NULL) err(1, "calloc"); #ifndef STANDALONE time(&utime); #endif if (mfs) { quad_t sz = (quad_t)fssize * sectorsize; if (sz > SIZE_T_MAX) { errno = ENOMEM; err(12, "mmap"); } membase = mmap(NULL, sz, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, (off_t)0); if (membase == MAP_FAILED) err(12, "mmap"); madvise(membase, sz, MADV_RANDOM); } fsi = fi; fso = fo; /* * Validate the given file system size. * Verify that its last block can actually be accessed. */ if (Oflag <= 1 && fssize > INT_MAX) errx(13, "preposterous size %lld, max is %d", fssize, INT_MAX); if (Oflag == 2 && fssize > MAXDISKSIZE) errx(13, "preposterous size %lld, max is %lld", fssize, MAXDISKSIZE); wtfs(fssize - 1, sectorsize, (char *)&sblock); sblock.fs_postblformat = FS_DYNAMICPOSTBLFMT; sblock.fs_avgfilesize = avgfilesize; sblock.fs_avgfpdir = avgfilesperdir; /* * Collect and verify the block and fragment sizes. */ if (!POWEROF2(bsize)) { errx(16, "block size must be a power of 2, not %d", bsize); } if (!POWEROF2(fsize)) { errx(17, "fragment size must be a power of 2, not %d", fsize); } if (fsize < sectorsize) { errx(18, "fragment size %d is too small, minimum is %d", fsize, sectorsize); } if (bsize < MINBSIZE) { errx(19, "block size %d is too small, minimum is %d", bsize, MINBSIZE); } if (bsize > MAXBSIZE) { errx(19, "block size %d is too large, maximum is %d", bsize, MAXBSIZE); } if (bsize < fsize) { errx(20, "block size (%d) cannot be smaller than fragment size (%d)", bsize, fsize); } sblock.fs_bsize = bsize; sblock.fs_fsize = fsize; /* * Calculate the superblock bitmasks and shifts. */ sblock.fs_bmask = ~(sblock.fs_bsize - 1); sblock.fs_fmask = ~(sblock.fs_fsize - 1); sblock.fs_qbmask = ~sblock.fs_bmask; sblock.fs_qfmask = ~sblock.fs_fmask; sblock.fs_bshift = ilog2(sblock.fs_bsize); sblock.fs_fshift = ilog2(sblock.fs_fsize); sblock.fs_frag = numfrags(&sblock, sblock.fs_bsize); if (sblock.fs_frag > MAXFRAG) { errx(21, "fragment size %d is too small, minimum with block " "size %d is %d", sblock.fs_fsize, sblock.fs_bsize, sblock.fs_bsize / MAXFRAG); } sblock.fs_fragshift = ilog2(sblock.fs_frag); sblock.fs_fsbtodb = ilog2(sblock.fs_fsize / sectorsize); sblock.fs_size = dbtofsb(&sblock, fssize); sblock.fs_nspf = sblock.fs_fsize / sectorsize; sblock.fs_maxcontig = 1; sblock.fs_nrpos = 1; sblock.fs_cpg = 1; /* * Before the file system is fully initialized, mark it as invalid. */ sblock.fs_magic = FS_BAD_MAGIC; /* * Set the remaining superblock fields. Note that for FFS1, media * geometry fields are set to fake values. This is for compatibility * with really ancient kernels that might still inspect these values. */ if (Oflag <= 1) { sblock.fs_sblockloc = SBLOCK_UFS1; sblock.fs_nindir = sblock.fs_bsize / sizeof(int32_t); sblock.fs_inopb = sblock.fs_bsize / sizeof(struct ufs1_dinode); if (Oflag == 0) { sblock.fs_maxsymlinklen = 0; sblock.fs_inodefmt = FS_42INODEFMT; } else { sblock.fs_maxsymlinklen = MAXSYMLINKLEN_UFS1; sblock.fs_inodefmt = FS_44INODEFMT; } sblock.fs_cgoffset = 0; sblock.fs_cgmask = 0xffffffff; sblock.fs_ffs1_size = sblock.fs_size; sblock.fs_rotdelay = 0; sblock.fs_rps = 60; sblock.fs_interleave = 1; sblock.fs_trackskew = 0; sblock.fs_cpc = 0; } else { sblock.fs_inodefmt = FS_44INODEFMT; sblock.fs_sblockloc = SBLOCK_UFS2; sblock.fs_nindir = sblock.fs_bsize / sizeof(int64_t); sblock.fs_inopb = sblock.fs_bsize / sizeof(struct ufs2_dinode); sblock.fs_maxsymlinklen = MAXSYMLINKLEN_UFS2; } sblock.fs_sblkno = roundup(howmany(sblock.fs_sblockloc + SBLOCKSIZE, sblock.fs_fsize), sblock.fs_frag); sblock.fs_cblkno = (int32_t)(sblock.fs_sblkno + roundup(howmany(SBSIZE, sblock.fs_fsize), sblock.fs_frag)); sblock.fs_iblkno = sblock.fs_cblkno + sblock.fs_frag; sblock.fs_maxfilesize = sblock.fs_bsize * NDADDR - 1; for (sizepb = sblock.fs_bsize, i = 0; i < NIADDR; i++) { sizepb *= NINDIR(&sblock); sblock.fs_maxfilesize += sizepb; } #ifdef notyet /* * It is impossible to create a snapshot in case fs_maxfilesize is * smaller than fssize. */ if (sblock.fs_maxfilesize < (u_quad_t)fssize) warnx("WARNING: You will be unable to create snapshots on this " "file system. Correct by using a larger blocksize."); #endif /* * Calculate the number of blocks to put into each cylinder group. The * first goal is to have at least enough data blocks in each cylinder * group to meet the density requirement. Once this goal is achieved * we try to expand to have at least mincylgrps cylinder groups. Once * this goal is achieved, we pack as many blocks into each cylinder * group map as will fit. * * We start by calculating the smallest number of blocks that we can * put into each cylinder group. If this is too big, we reduce the * density until it fits. */ origdensity = density; for (;;) { fragsperinode = MAX(numfrags(&sblock, density), 1); minfpg = fragsperinode * INOPB(&sblock); if (minfpg > sblock.fs_size) minfpg = sblock.fs_size; sblock.fs_ipg = INOPB(&sblock); sblock.fs_fpg = roundup(sblock.fs_iblkno + sblock.fs_ipg / INOPF(&sblock), sblock.fs_frag); if (sblock.fs_fpg < minfpg) sblock.fs_fpg = minfpg; sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode), INOPB(&sblock)); sblock.fs_fpg = roundup(sblock.fs_iblkno + sblock.fs_ipg / INOPF(&sblock), sblock.fs_frag); if (sblock.fs_fpg < minfpg) sblock.fs_fpg = minfpg; sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode), INOPB(&sblock)); if (CGSIZE(&sblock) < (unsigned long)sblock.fs_bsize) break; density -= sblock.fs_fsize; } if (density != origdensity) warnx("density reduced from %d to %d bytes per inode", origdensity, density); /* * Use a lower value for mincylgrps if the user specified a large * number of blocks per cylinder group. This is needed for, e.g. the * install media which needs to pack 2 files very tightly. */ mincylgrps = MINCYLGRPS; if (maxfrgspercg != INT_MAX) { i = sblock.fs_size / maxfrgspercg; if (i < MINCYLGRPS) mincylgrps = i <= 0 ? 1 : i; } /* * Start packing more blocks into the cylinder group until it cannot * grow any larger, the number of cylinder groups drops below * mincylgrps, or we reach the requested size. */ for (;;) { sblock.fs_fpg += sblock.fs_frag; sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode), INOPB(&sblock)); if (sblock.fs_fpg > maxfrgspercg || sblock.fs_size / sblock.fs_fpg < mincylgrps || CGSIZE(&sblock) > (unsigned long)sblock.fs_bsize) break; } sblock.fs_fpg -= sblock.fs_frag; sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode), INOPB(&sblock)); if (sblock.fs_fpg > maxfrgspercg) warnx("can't honour -c: minimum is %d", sblock.fs_fpg); /* * Check to be sure that the last cylinder group has enough blocks to * be viable. If it is too small, reduce the number of blocks per * cylinder group which will have the effect of moving more blocks into * the last cylinder group. */ optimalfpg = sblock.fs_fpg; for (;;) { sblock.fs_ncg = howmany(sblock.fs_size, sblock.fs_fpg); lastminfpg = roundup(sblock.fs_iblkno + sblock.fs_ipg / INOPF(&sblock), sblock.fs_frag); if (sblock.fs_size < lastminfpg) errx(28, "file system size %jd < minimum size of %d", (intmax_t)sblock.fs_size, lastminfpg); if (sblock.fs_size % sblock.fs_fpg >= lastminfpg || sblock.fs_size % sblock.fs_fpg == 0) break; sblock.fs_fpg -= sblock.fs_frag; sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode), INOPB(&sblock)); } if (optimalfpg != sblock.fs_fpg) warnx("reduced number of fragments per cylinder group from %d" " to %d to enlarge last cylinder group", optimalfpg, sblock.fs_fpg); /* * Back to filling superblock fields. */ if (Oflag <= 1) { sblock.fs_spc = sblock.fs_fpg * sblock.fs_nspf; sblock.fs_nsect = sblock.fs_spc; sblock.fs_npsect = sblock.fs_spc; sblock.fs_ncyl = sblock.fs_ncg; } sblock.fs_cgsize = fragroundup(&sblock, CGSIZE(&sblock)); sblock.fs_dblkno = sblock.fs_iblkno + sblock.fs_ipg / INOPF(&sblock); sblock.fs_csaddr = cgdmin(&sblock, 0); sblock.fs_cssize = fragroundup(&sblock, sblock.fs_ncg * sizeof(struct csum)); fscs = (struct csum *)calloc(1, sblock.fs_cssize); if (fscs == NULL) errx(31, "calloc failed"); sblock.fs_sbsize = fragroundup(&sblock, sizeof(struct fs)); if (sblock.fs_sbsize > SBLOCKSIZE) sblock.fs_sbsize = SBLOCKSIZE; sblock.fs_minfree = minfree; sblock.fs_maxbpg = maxbpg; sblock.fs_optim = opt; sblock.fs_cgrotor = 0; sblock.fs_pendingblocks = 0; sblock.fs_pendinginodes = 0; sblock.fs_fmod = 0; sblock.fs_ronly = 0; sblock.fs_state = 0; sblock.fs_clean = 1; sblock.fs_id[0] = (u_int32_t)utime; sblock.fs_id[1] = (u_int32_t)arc4random(); sblock.fs_fsmnt[0] = '\0'; csfrags = howmany(sblock.fs_cssize, sblock.fs_fsize); sblock.fs_dsize = sblock.fs_size - sblock.fs_sblkno - sblock.fs_ncg * (sblock.fs_dblkno - sblock.fs_sblkno); sblock.fs_cstotal.cs_nbfree = fragstoblks(&sblock, sblock.fs_dsize) - howmany(csfrags, sblock.fs_frag); sblock.fs_cstotal.cs_nffree = fragnum(&sblock, sblock.fs_size) + (fragnum(&sblock, csfrags) > 0 ? sblock.fs_frag - fragnum(&sblock, csfrags) : 0); sblock.fs_cstotal.cs_nifree = sblock.fs_ncg * sblock.fs_ipg - ROOTINO; sblock.fs_cstotal.cs_ndir = 0; sblock.fs_dsize -= csfrags; sblock.fs_time = utime; if (Oflag <= 1) { sblock.fs_ffs1_time = sblock.fs_time; sblock.fs_ffs1_dsize = sblock.fs_dsize; sblock.fs_ffs1_csaddr = sblock.fs_csaddr; sblock.fs_ffs1_cstotal.cs_ndir = sblock.fs_cstotal.cs_ndir; sblock.fs_ffs1_cstotal.cs_nbfree = sblock.fs_cstotal.cs_nbfree; sblock.fs_ffs1_cstotal.cs_nifree = sblock.fs_cstotal.cs_nifree; sblock.fs_ffs1_cstotal.cs_nffree = sblock.fs_cstotal.cs_nffree; } /* * Dump out summary information about file system. */ if (!mfs) { #define B2MBFACTOR (1 / (1024.0 * 1024.0)) printf("%s: %.1fMB in %jd sectors of %d bytes\n", fsys, (float)sblock.fs_size * sblock.fs_fsize * B2MBFACTOR, (intmax_t)fsbtodb(&sblock, sblock.fs_size), sectorsize); printf("%d cylinder groups of %.2fMB, %d blocks, %d" " inodes each\n", sblock.fs_ncg, (float)sblock.fs_fpg * sblock.fs_fsize * B2MBFACTOR, sblock.fs_fpg / sblock.fs_frag, sblock.fs_ipg); #undef B2MBFACTOR } /* * Wipe out old FFS1 superblock if necessary. */ if (Oflag >= 2) { union fs_u *fsun1; struct fs *fs1; fsun1 = calloc(1, sizeof(union fs_u)); if (fsun1 == NULL) err(39, "calloc"); fs1 = &fsun1->fs; rdfs(SBLOCK_UFS1 / sectorsize, SBSIZE, (char *)fs1); if (fs1->fs_magic == FS_UFS1_MAGIC) { fs1->fs_magic = FS_BAD_MAGIC; wtfs(SBLOCK_UFS1 / sectorsize, SBSIZE, (char *)fs1); } free(fsun1); } wtfs((int)sblock.fs_sblockloc / sectorsize, SBSIZE, (char *)&sblock); sblock.fs_magic = (Oflag <= 1) ? FS_UFS1_MAGIC : FS_UFS2_MAGIC; /* * Now build the cylinders group blocks and * then print out indices of cylinder groups. */ if (!quiet) printf("super-block backups (for fsck -b #) at:\n"); #ifndef STANDALONE else if (!mfs && isatty(STDIN_FILENO)) { signal(SIGINFO, siginfo); cur_fsys = fsys; } #endif i = 0; width = charsperline(); /* * Allocate space for superblock, cylinder group map, and two sets of * inode blocks. */ if (sblock.fs_bsize < SBLOCKSIZE) iobufsize = SBLOCKSIZE + 3 * sblock.fs_bsize; else iobufsize = 4 * sblock.fs_bsize; if ((iobuf = malloc(iobufsize)) == 0) errx(38, "cannot allocate I/O buffer"); bzero(iobuf, iobufsize); /* * Make a copy of the superblock into the buffer that we will be * writing out in each cylinder group. */ bcopy((char *)&sblock, iobuf, SBLOCKSIZE); for (cylno = 0; cylno < sblock.fs_ncg; cylno++) { cur_cylno = (sig_atomic_t)cylno; initcg(cylno, utime); if (quiet) continue; j = snprintf(tmpbuf, sizeof tmpbuf, " %lld,", fsbtodb(&sblock, cgsblock(&sblock, cylno))); if (j >= sizeof tmpbuf) j = sizeof tmpbuf - 1; if (j == -1 || i+j >= width) { printf("\n"); i = 0; } i += j; printf("%s", tmpbuf); fflush(stdout); } if (!quiet) printf("\n"); if (Nflag && !mfs) exit(0); /* * Now construct the initial file system, then write out the superblock. */ if (Oflag <= 1) { if (fsinit1(utime, mfsmode, mfsuid, mfsgid)) errx(32, "fsinit1 failed"); sblock.fs_ffs1_cstotal.cs_ndir = sblock.fs_cstotal.cs_ndir; sblock.fs_ffs1_cstotal.cs_nbfree = sblock.fs_cstotal.cs_nbfree; sblock.fs_ffs1_cstotal.cs_nifree = sblock.fs_cstotal.cs_nifree; sblock.fs_ffs1_cstotal.cs_nffree = sblock.fs_cstotal.cs_nffree; } else { if (fsinit2(utime)) errx(32, "fsinit2 failed"); } wtfs((int)sblock.fs_sblockloc / sectorsize, SBSIZE, (char *)&sblock); for (i = 0; i < sblock.fs_cssize; i += sblock.fs_bsize) wtfs(fsbtodb(&sblock, sblock.fs_csaddr + numfrags(&sblock, i)), sblock.fs_cssize - i < sblock.fs_bsize ? sblock.fs_cssize - i : sblock.fs_bsize, ((char *)fscs) + i); /* * Update information about this partion in pack label, to that it may * be updated on disk. */ pp->p_fstype = FS_BSDFFS; pp->p_fragblock = DISKLABELV1_FFS_FRAGBLOCK(sblock.fs_fsize, sblock.fs_frag); pp->p_cpg = sblock.fs_cpg; }
/* * ffsinfo(8) is a tool to dump all metadata of a filesystem. It helps to find * errors is the filesystem much easier. You can run ffsinfo before and after * an fsck(8), and compare the two ascii dumps easy with diff, and you see * directly where the problem is. You can control how much detail you want to * see with some command line arguments. You can also easy check the status * of a filesystem, like is there is enough space for growing a filesystem, * or how many active snapshots do we have. It provides much more detailed * information then dumpfs. Snapshots, as they are very new, are not really * supported. They are just mentioned currently, but it is planned to run * also over active snapshots, to even get that output. */ int main(int argc, char **argv) { char *device, *special; char ch; size_t len; struct stat st; struct partinfo pinfo; int fsi; struct csum *dbg_csp; int dbg_csc; char dbg_line[80]; int cylno,i; int cfg_cg, cfg_in, cfg_lv; int cg_start, cg_stop; ino_t in; char *out_file = NULL; int Lflag=0; DBG_ENTER; cfg_lv=0xff; cfg_in=-2; cfg_cg=-2; while ((ch=getopt(argc, argv, "Lg:i:l:o:")) != -1) { switch(ch) { case 'L': Lflag=1; break; case 'g': cfg_cg=atol(optarg); if(cfg_cg < -1) { usage(); } break; case 'i': cfg_in=atol(optarg); if(cfg_in < 0) { usage(); } break; case 'l': cfg_lv=atol(optarg); if(cfg_lv < 0x1||cfg_lv > 0x3ff) { usage(); } break; case 'o': if (out_file) free(out_file); out_file = strdup(optarg); break; case '?': /* FALLTHROUGH */ default: usage(); } } argc -= optind; argv += optind; if(argc != 1) { usage(); } device=*argv; /* * Now we try to guess the (raw)device name. */ if (0 == strrchr(device, '/') && (stat(device, &st) == -1)) { /* * No path prefix was given, so try in that order: * /dev/r%s * /dev/%s * /dev/vinum/r%s * /dev/vinum/%s. * * FreeBSD now doesn't distinguish between raw and block * devices any longer, but it should still work this way. */ len=strlen(device)+strlen(_PATH_DEV)+2+strlen("vinum/"); special=(char *)malloc(len); if(special == NULL) { errx(1, "malloc failed"); } snprintf(special, len, "%sr%s", _PATH_DEV, device); if (stat(special, &st) == -1) { snprintf(special, len, "%s%s", _PATH_DEV, device); if (stat(special, &st) == -1) { snprintf(special, len, "%svinum/r%s", _PATH_DEV, device); if (stat(special, &st) == -1) { /* * For now this is the 'last resort'. */ snprintf(special, len, "%svinum/%s", _PATH_DEV, device); } } } device = special; } /* * Open our device for reading. */ fsi = open(device, O_RDONLY); if (fsi < 0) { err(1, "%s", device); } stat(device, &st); if(S_ISREG(st.st_mode)) { /* label check not supported for files */ Lflag=1; } if(!Lflag) { /* * Try to read a label and gess the slice if not specified. * This code should guess the right thing and avaid to bother * the user user with the task of specifying the option -v on * vinum volumes. */ if (ioctl(fsi, DIOCGPART, &pinfo) < 0) { pinfo.media_size = st.st_size; pinfo.media_blksize = DEV_BSIZE; pinfo.media_blocks = pinfo.media_size / DEV_BSIZE; } /* * Check if that partition looks suited for dumping. */ if (pinfo.media_size == 0) { errx(1, "partition is unavailable"); } } /* * Read the current superblock. */ rdfs((daddr_t)(SBOFF/DEV_BSIZE), (size_t)SBSIZE, &sblock, fsi); if (sblock.fs_magic != FS_MAGIC) { errx(1, "superblock not recognized"); } DBG_OPEN(out_file); /* already here we need a superblock */ if(cfg_lv & 0x001) { DBG_DUMP_FS(&sblock, "primary sblock"); } /* * Determine here what cylinder groups to dump. */ if(cfg_cg==-2) { cg_start=0; cg_stop=sblock.fs_ncg; } else if (cfg_cg==-1) { cg_start=sblock.fs_ncg-1; cg_stop=sblock.fs_ncg; } else if (cfg_cg<sblock.fs_ncg) { cg_start=cfg_cg; cg_stop=cfg_cg+1; } else { cg_start=sblock.fs_ncg; cg_stop=sblock.fs_ncg; } if (cfg_lv & 0x004) { fscs = (struct csum *)calloc((size_t)1, (size_t)sblock.fs_cssize); if(fscs == NULL) { errx(1, "calloc failed"); } /* * Get the cylinder summary into the memory ... */ for (i = 0; i < sblock.fs_cssize; i += sblock.fs_bsize) { rdfs(fsbtodb(&sblock, sblock.fs_csaddr + numfrags(&sblock, i)), (size_t)(sblock.fs_cssize-i< sblock.fs_bsize ? sblock.fs_cssize - i : sblock.fs_bsize), (void *)(((char *)fscs)+i), fsi); } dbg_csp=fscs; /* * ... and dump it. */ for(dbg_csc=0; dbg_csc<sblock.fs_ncg; dbg_csc++) { snprintf(dbg_line, sizeof(dbg_line), "%d. csum in fscs", dbg_csc); DBG_DUMP_CSUM(&sblock, dbg_line, dbg_csp++); } } /* * For each requested cylinder group ... */ for(cylno=cg_start; cylno<cg_stop; cylno++) { snprintf(dbg_line, sizeof(dbg_line), "cgr %d", cylno); if(cfg_lv & 0x002) { /* * ... dump the superblock copies ... */ rdfs(fsbtodb(&sblock, cgsblock(&sblock, cylno)), (size_t)SBSIZE, &osblock, fsi); DBG_DUMP_FS(&osblock, dbg_line); } /* * ... read the cylinder group and dump whatever was requested. */ rdfs(fsbtodb(&sblock, cgtod(&sblock, cylno)), (size_t)sblock.fs_cgsize, &acg, fsi); if(cfg_lv & 0x008) { DBG_DUMP_CG(&sblock, dbg_line, &acg); } if(cfg_lv & 0x010) { DBG_DUMP_INMAP(&sblock, dbg_line, &acg); } if(cfg_lv & 0x020) { DBG_DUMP_FRMAP(&sblock, dbg_line, &acg); } if(cfg_lv & 0x040) { DBG_DUMP_CLMAP(&sblock, dbg_line, &acg); DBG_DUMP_CLSUM(&sblock, dbg_line, &acg); } if(cfg_lv & 0x080) { DBG_DUMP_SPTBL(&sblock, dbg_line, &acg); } } /* * Dump the requested inode(s). */ if(cfg_in != -2) { dump_whole_inode((ino_t)cfg_in, fsi, cfg_lv); } else { for(in=cg_start*sblock.fs_ipg; in<(ino_t)cg_stop*sblock.fs_ipg; in++) { dump_whole_inode(in, fsi, cfg_lv); } } DBG_CLOSE; close(fsi); DBG_LEAVE; return 0; }
/* * Truncate the inode oip to at most length size, freeing the * disk blocks. */ int ffs_truncate(struct vnode *ovp, off_t length, int ioflag, kauth_cred_t cred) { daddr_t lastblock; struct inode *oip = VTOI(ovp); daddr_t bn, lastiblock[NIADDR], indir_lbn[NIADDR]; daddr_t blks[NDADDR + NIADDR]; struct fs *fs; int offset, pgoffset, level; int64_t count, blocksreleased = 0; int i, aflag, nblocks; int error, allerror = 0; off_t osize; int sync; struct ufsmount *ump = oip->i_ump; if (ovp->v_type == VCHR || ovp->v_type == VBLK || ovp->v_type == VFIFO || ovp->v_type == VSOCK) { KASSERT(oip->i_size == 0); return 0; } if (length < 0) return (EINVAL); if (ovp->v_type == VLNK && (oip->i_size < ump->um_maxsymlinklen || (ump->um_maxsymlinklen == 0 && DIP(oip, blocks) == 0))) { KDASSERT(length == 0); memset(SHORTLINK(oip), 0, (size_t)oip->i_size); oip->i_size = 0; DIP_ASSIGN(oip, size, 0); oip->i_flag |= IN_CHANGE | IN_UPDATE; return (ffs_update(ovp, NULL, NULL, 0)); } if (oip->i_size == length) { /* still do a uvm_vnp_setsize() as writesize may be larger */ uvm_vnp_setsize(ovp, length); oip->i_flag |= IN_CHANGE | IN_UPDATE; return (ffs_update(ovp, NULL, NULL, 0)); } fs = oip->i_fs; if (length > ump->um_maxfilesize) return (EFBIG); if ((oip->i_flags & SF_SNAPSHOT) != 0) ffs_snapremove(ovp); osize = oip->i_size; aflag = ioflag & IO_SYNC ? B_SYNC : 0; /* * Lengthen the size of the file. We must ensure that the * last byte of the file is allocated. Since the smallest * value of osize is 0, length will be at least 1. */ if (osize < length) { if (lblkno(fs, osize) < NDADDR && lblkno(fs, osize) != lblkno(fs, length) && blkroundup(fs, osize) != osize) { off_t eob; eob = blkroundup(fs, osize); uvm_vnp_setwritesize(ovp, eob); error = ufs_balloc_range(ovp, osize, eob - osize, cred, aflag); if (error) { (void) ffs_truncate(ovp, osize, ioflag & IO_SYNC, cred); return error; } if (ioflag & IO_SYNC) { mutex_enter(ovp->v_interlock); VOP_PUTPAGES(ovp, trunc_page(osize & fs->fs_bmask), round_page(eob), PGO_CLEANIT | PGO_SYNCIO | PGO_JOURNALLOCKED); } } uvm_vnp_setwritesize(ovp, length); error = ufs_balloc_range(ovp, length - 1, 1, cred, aflag); if (error) { (void) ffs_truncate(ovp, osize, ioflag & IO_SYNC, cred); return (error); } uvm_vnp_setsize(ovp, length); oip->i_flag |= IN_CHANGE | IN_UPDATE; KASSERT(ovp->v_size == oip->i_size); return (ffs_update(ovp, NULL, NULL, 0)); } /* * When truncating a regular file down to a non-block-aligned size, * we must zero the part of last block which is past the new EOF. * We must synchronously flush the zeroed pages to disk * since the new pages will be invalidated as soon as we * inform the VM system of the new, smaller size. * We must do this before acquiring the GLOCK, since fetching * the pages will acquire the GLOCK internally. * So there is a window where another thread could see a whole * zeroed page past EOF, but that's life. */ offset = blkoff(fs, length); pgoffset = length & PAGE_MASK; if (ovp->v_type == VREG && (pgoffset != 0 || offset != 0) && osize > length) { daddr_t lbn; voff_t eoz; int size; if (offset != 0) { error = ufs_balloc_range(ovp, length - 1, 1, cred, aflag); if (error) return error; } lbn = lblkno(fs, length); size = blksize(fs, oip, lbn); eoz = MIN(MAX(lblktosize(fs, lbn) + size, round_page(pgoffset)), osize); ubc_zerorange(&ovp->v_uobj, length, eoz - length, UBC_UNMAP_FLAG(ovp)); if (round_page(eoz) > round_page(length)) { mutex_enter(ovp->v_interlock); error = VOP_PUTPAGES(ovp, round_page(length), round_page(eoz), PGO_CLEANIT | PGO_DEACTIVATE | PGO_JOURNALLOCKED | ((ioflag & IO_SYNC) ? PGO_SYNCIO : 0)); if (error) return error; } } genfs_node_wrlock(ovp); oip->i_size = length; DIP_ASSIGN(oip, size, length); uvm_vnp_setsize(ovp, length); /* * Calculate index into inode's block list of * last direct and indirect blocks (if any) * which we want to keep. Lastblock is -1 when * the file is truncated to 0. */ lastblock = lblkno(fs, length + fs->fs_bsize - 1) - 1; lastiblock[SINGLE] = lastblock - NDADDR; lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs); lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs); nblocks = btodb(fs->fs_bsize); /* * Update file and block pointers on disk before we start freeing * blocks. If we crash before free'ing blocks below, the blocks * will be returned to the free list. lastiblock values are also * normalized to -1 for calls to ffs_indirtrunc below. */ sync = 0; for (level = TRIPLE; level >= SINGLE; level--) { blks[NDADDR + level] = DIP(oip, ib[level]); if (lastiblock[level] < 0 && blks[NDADDR + level] != 0) { sync = 1; DIP_ASSIGN(oip, ib[level], 0); lastiblock[level] = -1; } } for (i = 0; i < NDADDR; i++) { blks[i] = DIP(oip, db[i]); if (i > lastblock && blks[i] != 0) { sync = 1; DIP_ASSIGN(oip, db[i], 0); } } oip->i_flag |= IN_CHANGE | IN_UPDATE; if (sync) { error = ffs_update(ovp, NULL, NULL, UPDATE_WAIT); if (error && !allerror) allerror = error; } /* * Having written the new inode to disk, save its new configuration * and put back the old block pointers long enough to process them. * Note that we save the new block configuration so we can check it * when we are done. */ for (i = 0; i < NDADDR; i++) { bn = DIP(oip, db[i]); DIP_ASSIGN(oip, db[i], blks[i]); blks[i] = bn; } for (i = 0; i < NIADDR; i++) { bn = DIP(oip, ib[i]); DIP_ASSIGN(oip, ib[i], blks[NDADDR + i]); blks[NDADDR + i] = bn; } oip->i_size = osize; DIP_ASSIGN(oip, size, osize); error = vtruncbuf(ovp, lastblock + 1, 0, 0); if (error && !allerror) allerror = error; /* * Indirect blocks first. */ indir_lbn[SINGLE] = -NDADDR; indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1; indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1; for (level = TRIPLE; level >= SINGLE; level--) { if (oip->i_ump->um_fstype == UFS1) bn = ufs_rw32(oip->i_ffs1_ib[level],UFS_FSNEEDSWAP(fs)); else bn = ufs_rw64(oip->i_ffs2_ib[level],UFS_FSNEEDSWAP(fs)); if (bn != 0) { error = ffs_indirtrunc(oip, indir_lbn[level], fsbtodb(fs, bn), lastiblock[level], level, &count); if (error) allerror = error; blocksreleased += count; if (lastiblock[level] < 0) { DIP_ASSIGN(oip, ib[level], 0); if (oip->i_ump->um_mountp->mnt_wapbl) { UFS_WAPBL_REGISTER_DEALLOCATION( oip->i_ump->um_mountp, fsbtodb(fs, bn), fs->fs_bsize); } else ffs_blkfree(fs, oip->i_devvp, bn, fs->fs_bsize, oip->i_number); blocksreleased += nblocks; } } if (lastiblock[level] >= 0) goto done; } /* * All whole direct blocks or frags. */ for (i = NDADDR - 1; i > lastblock; i--) { long bsize; if (oip->i_ump->um_fstype == UFS1) bn = ufs_rw32(oip->i_ffs1_db[i], UFS_FSNEEDSWAP(fs)); else bn = ufs_rw64(oip->i_ffs2_db[i], UFS_FSNEEDSWAP(fs)); if (bn == 0) continue; DIP_ASSIGN(oip, db[i], 0); bsize = blksize(fs, oip, i); if ((oip->i_ump->um_mountp->mnt_wapbl) && (ovp->v_type != VREG)) { UFS_WAPBL_REGISTER_DEALLOCATION(oip->i_ump->um_mountp, fsbtodb(fs, bn), bsize); } else ffs_blkfree(fs, oip->i_devvp, bn, bsize, oip->i_number); blocksreleased += btodb(bsize); } if (lastblock < 0) goto done; /* * Finally, look for a change in size of the * last direct block; release any frags. */ if (oip->i_ump->um_fstype == UFS1) bn = ufs_rw32(oip->i_ffs1_db[lastblock], UFS_FSNEEDSWAP(fs)); else bn = ufs_rw64(oip->i_ffs2_db[lastblock], UFS_FSNEEDSWAP(fs)); if (bn != 0) { long oldspace, newspace; /* * Calculate amount of space we're giving * back as old block size minus new block size. */ oldspace = blksize(fs, oip, lastblock); oip->i_size = length; DIP_ASSIGN(oip, size, length); newspace = blksize(fs, oip, lastblock); if (newspace == 0) panic("itrunc: newspace"); if (oldspace - newspace > 0) { /* * Block number of space to be free'd is * the old block # plus the number of frags * required for the storage we're keeping. */ bn += numfrags(fs, newspace); if ((oip->i_ump->um_mountp->mnt_wapbl) && (ovp->v_type != VREG)) { UFS_WAPBL_REGISTER_DEALLOCATION( oip->i_ump->um_mountp, fsbtodb(fs, bn), oldspace - newspace); } else ffs_blkfree(fs, oip->i_devvp, bn, oldspace - newspace, oip->i_number); blocksreleased += btodb(oldspace - newspace); } } done: #ifdef DIAGNOSTIC for (level = SINGLE; level <= TRIPLE; level++) if (blks[NDADDR + level] != DIP(oip, ib[level])) panic("itrunc1"); for (i = 0; i < NDADDR; i++) if (blks[i] != DIP(oip, db[i])) panic("itrunc2"); if (length == 0 && (!LIST_EMPTY(&ovp->v_cleanblkhd) || !LIST_EMPTY(&ovp->v_dirtyblkhd))) panic("itrunc3"); #endif /* DIAGNOSTIC */ /* * Put back the real size. */ oip->i_size = length; DIP_ASSIGN(oip, size, length); DIP_ADD(oip, blocks, -blocksreleased); genfs_node_unlock(ovp); oip->i_flag |= IN_CHANGE; UFS_WAPBL_UPDATE(ovp, NULL, NULL, 0); #if defined(QUOTA) || defined(QUOTA2) (void) chkdq(oip, -blocksreleased, NOCRED, 0); #endif KASSERT(ovp->v_type != VREG || ovp->v_size == oip->i_size); return (allerror); }
/* * Truncate the inode oip to at most length size, freeing the * disk blocks. */ int ffs_truncate(struct vnode *vp, off_t length, int flags, struct ucred *cred) { struct vnode *ovp = vp; ufs_daddr_t lastblock; struct inode *oip; ufs_daddr_t bn, lbn, lastiblock[NIADDR], indir_lbn[NIADDR]; ufs_daddr_t oldblks[NDADDR + NIADDR], newblks[NDADDR + NIADDR]; struct fs *fs; struct buf *bp; int offset, size, level; long count, nblocks, blocksreleased = 0; int i; int aflags, error, allerror; off_t osize; oip = VTOI(ovp); fs = oip->i_fs; if (length < 0) return (EINVAL); if (length > fs->fs_maxfilesize) return (EFBIG); if (ovp->v_type == VLNK && (oip->i_size < ovp->v_mount->mnt_maxsymlinklen || oip->i_din.di_blocks == 0)) { #ifdef DIAGNOSTIC if (length != 0) panic("ffs_truncate: partial truncate of symlink"); #endif /* DIAGNOSTIC */ bzero((char *)&oip->i_shortlink, (uint)oip->i_size); oip->i_size = 0; oip->i_flag |= IN_CHANGE | IN_UPDATE; return (ffs_update(ovp, 1)); } if (oip->i_size == length) { oip->i_flag |= IN_CHANGE | IN_UPDATE; return (ffs_update(ovp, 0)); } if (fs->fs_ronly) panic("ffs_truncate: read-only filesystem"); #ifdef QUOTA error = ufs_getinoquota(oip); if (error) return (error); #endif ovp->v_lasta = ovp->v_clen = ovp->v_cstart = ovp->v_lastw = 0; if (DOINGSOFTDEP(ovp)) { if (length > 0 || softdep_slowdown(ovp)) { /* * If a file is only partially truncated, then * we have to clean up the data structures * describing the allocation past the truncation * point. Finding and deallocating those structures * is a lot of work. Since partial truncation occurs * rarely, we solve the problem by syncing the file * so that it will have no data structures left. */ if ((error = VOP_FSYNC(ovp, MNT_WAIT, 0)) != 0) return (error); } else { #ifdef QUOTA (void) ufs_chkdq(oip, -oip->i_blocks, NOCRED, 0); #endif softdep_setup_freeblocks(oip, length); vinvalbuf(ovp, 0, 0, 0); nvnode_pager_setsize(ovp, 0, fs->fs_bsize, 0); oip->i_flag |= IN_CHANGE | IN_UPDATE; return (ffs_update(ovp, 0)); } } osize = oip->i_size; /* * Lengthen the size of the file. We must ensure that the * last byte of the file is allocated. Since the smallest * value of osize is 0, length will be at least 1. * * nvextendbuf() only breads the old buffer. The blocksize * of the new buffer must be specified so it knows how large * to make the VM object. */ if (osize < length) { nvextendbuf(vp, osize, length, blkoffsize(fs, oip, osize), /* oblksize */ blkoffresize(fs, length), /* nblksize */ blkoff(fs, osize), blkoff(fs, length), 0); aflags = B_CLRBUF; if (flags & IO_SYNC) aflags |= B_SYNC; /* BALLOC will reallocate the fragment at the old EOF */ error = VOP_BALLOC(ovp, length - 1, 1, cred, aflags, &bp); if (error) return (error); oip->i_size = length; if (bp->b_bufsize == fs->fs_bsize) bp->b_flags |= B_CLUSTEROK; if (aflags & B_SYNC) bwrite(bp); else bawrite(bp); oip->i_flag |= IN_CHANGE | IN_UPDATE; return (ffs_update(ovp, 1)); } /* * Shorten the size of the file. * * NOTE: The block size specified in nvtruncbuf() is the blocksize * of the buffer containing length prior to any reallocation * of the block. */ allerror = nvtruncbuf(ovp, length, blkoffsize(fs, oip, length), blkoff(fs, length), 0); offset = blkoff(fs, length); if (offset == 0) { oip->i_size = length; } else { lbn = lblkno(fs, length); aflags = B_CLRBUF; if (flags & IO_SYNC) aflags |= B_SYNC; error = VOP_BALLOC(ovp, length - 1, 1, cred, aflags, &bp); if (error) return (error); /* * When we are doing soft updates and the UFS_BALLOC * above fills in a direct block hole with a full sized * block that will be truncated down to a fragment below, * we must flush out the block dependency with an FSYNC * so that we do not get a soft updates inconsistency * when we create the fragment below. * * nvtruncbuf() may have re-dirtied the underlying block * as part of its truncation zeroing code. To avoid a * 'locking against myself' panic in the second fsync we * can simply undirty the bp since the redirtying was * related to areas of the buffer that we are going to * throw away anyway, and we will b*write() the remainder * anyway down below. */ if (DOINGSOFTDEP(ovp) && lbn < NDADDR && fragroundup(fs, blkoff(fs, length)) < fs->fs_bsize) { bundirty(bp); error = VOP_FSYNC(ovp, MNT_WAIT, 0); if (error) { bdwrite(bp); return (error); } } oip->i_size = length; size = blksize(fs, oip, lbn); #if 0 /* remove - nvtruncbuf deals with this */ if (ovp->v_type != VDIR) bzero((char *)bp->b_data + offset, (uint)(size - offset)); #endif /* Kirk's code has reallocbuf(bp, size, 1) here */ allocbuf(bp, size); if (bp->b_bufsize == fs->fs_bsize) bp->b_flags |= B_CLUSTEROK; if (aflags & B_SYNC) bwrite(bp); else bawrite(bp); } /* * Calculate index into inode's block list of * last direct and indirect blocks (if any) * which we want to keep. Lastblock is -1 when * the file is truncated to 0. */ lastblock = lblkno(fs, length + fs->fs_bsize - 1) - 1; lastiblock[SINGLE] = lastblock - NDADDR; lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs); lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs); nblocks = btodb(fs->fs_bsize); /* * Update file and block pointers on disk before we start freeing * blocks. If we crash before free'ing blocks below, the blocks * will be returned to the free list. lastiblock values are also * normalized to -1 for calls to ffs_indirtrunc below. */ bcopy((caddr_t)&oip->i_db[0], (caddr_t)oldblks, sizeof oldblks); for (level = TRIPLE; level >= SINGLE; level--) if (lastiblock[level] < 0) { oip->i_ib[level] = 0; lastiblock[level] = -1; } for (i = NDADDR - 1; i > lastblock; i--) oip->i_db[i] = 0; oip->i_flag |= IN_CHANGE | IN_UPDATE; error = ffs_update(ovp, 1); if (error && allerror == 0) allerror = error; /* * Having written the new inode to disk, save its new configuration * and put back the old block pointers long enough to process them. * Note that we save the new block configuration so we can check it * when we are done. */ bcopy((caddr_t)&oip->i_db[0], (caddr_t)newblks, sizeof newblks); bcopy((caddr_t)oldblks, (caddr_t)&oip->i_db[0], sizeof oldblks); oip->i_size = osize; if (error && allerror == 0) allerror = error; /* * Indirect blocks first. */ indir_lbn[SINGLE] = -NDADDR; indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1; indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1; for (level = TRIPLE; level >= SINGLE; level--) { bn = oip->i_ib[level]; if (bn != 0) { error = ffs_indirtrunc(oip, indir_lbn[level], fsbtodb(fs, bn), lastiblock[level], level, &count); if (error) allerror = error; blocksreleased += count; if (lastiblock[level] < 0) { oip->i_ib[level] = 0; ffs_blkfree(oip, bn, fs->fs_bsize); blocksreleased += nblocks; } } if (lastiblock[level] >= 0) goto done; } /* * All whole direct blocks or frags. */ for (i = NDADDR - 1; i > lastblock; i--) { long bsize; bn = oip->i_db[i]; if (bn == 0) continue; oip->i_db[i] = 0; bsize = blksize(fs, oip, i); ffs_blkfree(oip, bn, bsize); blocksreleased += btodb(bsize); } if (lastblock < 0) goto done; /* * Finally, look for a change in size of the * last direct block; release any frags. */ bn = oip->i_db[lastblock]; if (bn != 0) { long oldspace, newspace; /* * Calculate amount of space we're giving * back as old block size minus new block size. */ oldspace = blksize(fs, oip, lastblock); oip->i_size = length; newspace = blksize(fs, oip, lastblock); if (newspace == 0) panic("ffs_truncate: newspace"); if (oldspace - newspace > 0) { /* * Block number of space to be free'd is * the old block # plus the number of frags * required for the storage we're keeping. */ bn += numfrags(fs, newspace); ffs_blkfree(oip, bn, oldspace - newspace); blocksreleased += btodb(oldspace - newspace); } } done: #ifdef DIAGNOSTIC for (level = SINGLE; level <= TRIPLE; level++) if (newblks[NDADDR + level] != oip->i_ib[level]) panic("ffs_truncate1"); for (i = 0; i < NDADDR; i++) if (newblks[i] != oip->i_db[i]) panic("ffs_truncate2"); if (length == 0 && !RB_EMPTY(&ovp->v_rbdirty_tree)) panic("ffs_truncate3"); #endif /* DIAGNOSTIC */ /* * Put back the real size. */ oip->i_size = length; oip->i_blocks -= blocksreleased; if (oip->i_blocks < 0) /* sanity */ oip->i_blocks = 0; oip->i_flag |= IN_CHANGE; #ifdef QUOTA (void) ufs_chkdq(oip, -blocksreleased, NOCRED, 0); #endif return (allerror); }
void mkfs(struct partition *pp, char *fsys) { int fragsperinode, optimalfpg, origdensity, minfpg, lastminfpg; long i, j, csfrags; uint cg; time_t utime; quad_t sizepb; int width; ino_t maxinum; int minfragsperinode; /* minimum ratio of frags to inodes */ char tmpbuf[100]; /* XXX this will break in about 2,500 years */ union { struct fs fdummy; char cdummy[SBLOCKSIZE]; } dummy; #define fsdummy dummy.fdummy #define chdummy dummy.cdummy /* * Our blocks == sector size, and the version of UFS we are using is * specified by Oflag. */ disk.d_bsize = sectorsize; disk.d_ufs = Oflag; if (Rflag) { utime = 1000000000; } else { time(&utime); arc4random_stir(); } sblock.fs_old_flags = FS_FLAGS_UPDATED; sblock.fs_flags = 0; if (Uflag) sblock.fs_flags |= FS_DOSOFTDEP; if (Lflag) strlcpy(sblock.fs_volname, volumelabel, MAXVOLLEN); if (Jflag) sblock.fs_flags |= FS_GJOURNAL; if (lflag) sblock.fs_flags |= FS_MULTILABEL; if (tflag) sblock.fs_flags |= FS_TRIM; /* * Validate the given file system size. * Verify that its last block can actually be accessed. * Convert to file system fragment sized units. */ if (fssize <= 0) { printf("preposterous size %jd\n", (intmax_t)fssize); exit(13); } wtfs(fssize - (realsectorsize / DEV_BSIZE), realsectorsize, (char *)&sblock); /* * collect and verify the file system density info */ sblock.fs_avgfilesize = avgfilesize; sblock.fs_avgfpdir = avgfilesperdir; if (sblock.fs_avgfilesize <= 0) printf("illegal expected average file size %d\n", sblock.fs_avgfilesize), exit(14); if (sblock.fs_avgfpdir <= 0) printf("illegal expected number of files per directory %d\n", sblock.fs_avgfpdir), exit(15); restart: /* * collect and verify the block and fragment sizes */ sblock.fs_bsize = bsize; sblock.fs_fsize = fsize; if (!POWEROF2(sblock.fs_bsize)) { printf("block size must be a power of 2, not %d\n", sblock.fs_bsize); exit(16); } if (!POWEROF2(sblock.fs_fsize)) { printf("fragment size must be a power of 2, not %d\n", sblock.fs_fsize); exit(17); } if (sblock.fs_fsize < sectorsize) { printf("increasing fragment size from %d to sector size (%d)\n", sblock.fs_fsize, sectorsize); sblock.fs_fsize = sectorsize; } if (sblock.fs_bsize > MAXBSIZE) { printf("decreasing block size from %d to maximum (%d)\n", sblock.fs_bsize, MAXBSIZE); sblock.fs_bsize = MAXBSIZE; } if (sblock.fs_bsize < MINBSIZE) { printf("increasing block size from %d to minimum (%d)\n", sblock.fs_bsize, MINBSIZE); sblock.fs_bsize = MINBSIZE; } if (sblock.fs_fsize > MAXBSIZE) { printf("decreasing fragment size from %d to maximum (%d)\n", sblock.fs_fsize, MAXBSIZE); sblock.fs_fsize = MAXBSIZE; } if (sblock.fs_bsize < sblock.fs_fsize) { printf("increasing block size from %d to fragment size (%d)\n", sblock.fs_bsize, sblock.fs_fsize); sblock.fs_bsize = sblock.fs_fsize; } if (sblock.fs_fsize * MAXFRAG < sblock.fs_bsize) { printf( "increasing fragment size from %d to block size / %d (%d)\n", sblock.fs_fsize, MAXFRAG, sblock.fs_bsize / MAXFRAG); sblock.fs_fsize = sblock.fs_bsize / MAXFRAG; } if (maxbsize == 0) maxbsize = bsize; if (maxbsize < bsize || !POWEROF2(maxbsize)) { sblock.fs_maxbsize = sblock.fs_bsize; printf("Extent size set to %d\n", sblock.fs_maxbsize); } else if (sblock.fs_maxbsize > FS_MAXCONTIG * sblock.fs_bsize) { sblock.fs_maxbsize = FS_MAXCONTIG * sblock.fs_bsize; printf("Extent size reduced to %d\n", sblock.fs_maxbsize); } else { sblock.fs_maxbsize = maxbsize; } /* * Maxcontig sets the default for the maximum number of blocks * that may be allocated sequentially. With file system clustering * it is possible to allocate contiguous blocks up to the maximum * transfer size permitted by the controller or buffering. */ if (maxcontig == 0) maxcontig = MAX(1, MAXPHYS / bsize); sblock.fs_maxcontig = maxcontig; if (sblock.fs_maxcontig < sblock.fs_maxbsize / sblock.fs_bsize) { sblock.fs_maxcontig = sblock.fs_maxbsize / sblock.fs_bsize; printf("Maxcontig raised to %d\n", sblock.fs_maxbsize); } if (sblock.fs_maxcontig > 1) sblock.fs_contigsumsize = MIN(sblock.fs_maxcontig,FS_MAXCONTIG); sblock.fs_bmask = ~(sblock.fs_bsize - 1); sblock.fs_fmask = ~(sblock.fs_fsize - 1); sblock.fs_qbmask = ~sblock.fs_bmask; sblock.fs_qfmask = ~sblock.fs_fmask; sblock.fs_bshift = ilog2(sblock.fs_bsize); sblock.fs_fshift = ilog2(sblock.fs_fsize); sblock.fs_frag = numfrags(&sblock, sblock.fs_bsize); sblock.fs_fragshift = ilog2(sblock.fs_frag); if (sblock.fs_frag > MAXFRAG) { printf("fragment size %d is still too small (can't happen)\n", sblock.fs_bsize / MAXFRAG); exit(21); } sblock.fs_fsbtodb = ilog2(sblock.fs_fsize / sectorsize); sblock.fs_size = fssize = dbtofsb(&sblock, fssize); sblock.fs_providersize = dbtofsb(&sblock, mediasize / sectorsize); /* * Before the filesystem is finally initialized, mark it * as incompletely initialized. */ sblock.fs_magic = FS_BAD_MAGIC; if (Oflag == 1) { sblock.fs_sblockloc = SBLOCK_UFS1; sblock.fs_nindir = sblock.fs_bsize / sizeof(ufs1_daddr_t); sblock.fs_inopb = sblock.fs_bsize / sizeof(struct ufs1_dinode); sblock.fs_maxsymlinklen = ((NDADDR + NIADDR) * sizeof(ufs1_daddr_t)); sblock.fs_old_inodefmt = FS_44INODEFMT; sblock.fs_old_cgoffset = 0; sblock.fs_old_cgmask = 0xffffffff; sblock.fs_old_size = sblock.fs_size; sblock.fs_old_rotdelay = 0; sblock.fs_old_rps = 60; sblock.fs_old_nspf = sblock.fs_fsize / sectorsize; sblock.fs_old_cpg = 1; sblock.fs_old_interleave = 1; sblock.fs_old_trackskew = 0; sblock.fs_old_cpc = 0; sblock.fs_old_postblformat = 1; sblock.fs_old_nrpos = 1; } else { sblock.fs_sblockloc = SBLOCK_UFS2; sblock.fs_nindir = sblock.fs_bsize / sizeof(ufs2_daddr_t); sblock.fs_inopb = sblock.fs_bsize / sizeof(struct ufs2_dinode); sblock.fs_maxsymlinklen = ((NDADDR + NIADDR) * sizeof(ufs2_daddr_t)); } sblock.fs_sblkno = roundup(howmany(sblock.fs_sblockloc + SBLOCKSIZE, sblock.fs_fsize), sblock.fs_frag); sblock.fs_cblkno = sblock.fs_sblkno + roundup(howmany(SBLOCKSIZE, sblock.fs_fsize), sblock.fs_frag); sblock.fs_iblkno = sblock.fs_cblkno + sblock.fs_frag; sblock.fs_maxfilesize = sblock.fs_bsize * NDADDR - 1; for (sizepb = sblock.fs_bsize, i = 0; i < NIADDR; i++) { sizepb *= NINDIR(&sblock); sblock.fs_maxfilesize += sizepb; } /* * It's impossible to create a snapshot in case that fs_maxfilesize * is smaller than the fssize. */ if (sblock.fs_maxfilesize < (u_quad_t)fssize) { warnx("WARNING: You will be unable to create snapshots on this " "file system. Correct by using a larger blocksize."); } /* * Calculate the number of blocks to put into each cylinder group. * * This algorithm selects the number of blocks per cylinder * group. The first goal is to have at least enough data blocks * in each cylinder group to meet the density requirement. Once * this goal is achieved we try to expand to have at least * MINCYLGRPS cylinder groups. Once this goal is achieved, we * pack as many blocks into each cylinder group map as will fit. * * We start by calculating the smallest number of blocks that we * can put into each cylinder group. If this is too big, we reduce * the density until it fits. */ maxinum = (((int64_t)(1)) << 32) - INOPB(&sblock); minfragsperinode = 1 + fssize / maxinum; if (density == 0) { density = MAX(NFPI, minfragsperinode) * fsize; } else if (density < minfragsperinode * fsize) { origdensity = density; density = minfragsperinode * fsize; fprintf(stderr, "density increased from %d to %d\n", origdensity, density); } origdensity = density; for (;;) { fragsperinode = MAX(numfrags(&sblock, density), 1); if (fragsperinode < minfragsperinode) { bsize <<= 1; fsize <<= 1; printf("Block size too small for a file system %s %d\n", "of this size. Increasing blocksize to", bsize); goto restart; } minfpg = fragsperinode * INOPB(&sblock); if (minfpg > sblock.fs_size) minfpg = sblock.fs_size; sblock.fs_ipg = INOPB(&sblock); sblock.fs_fpg = roundup(sblock.fs_iblkno + sblock.fs_ipg / INOPF(&sblock), sblock.fs_frag); if (sblock.fs_fpg < minfpg) sblock.fs_fpg = minfpg; sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode), INOPB(&sblock)); sblock.fs_fpg = roundup(sblock.fs_iblkno + sblock.fs_ipg / INOPF(&sblock), sblock.fs_frag); if (sblock.fs_fpg < minfpg) sblock.fs_fpg = minfpg; sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode), INOPB(&sblock)); if (CGSIZE(&sblock) < (unsigned long)sblock.fs_bsize) break; density -= sblock.fs_fsize; } if (density != origdensity) printf("density reduced from %d to %d\n", origdensity, density); /* * Start packing more blocks into the cylinder group until * it cannot grow any larger, the number of cylinder groups * drops below MINCYLGRPS, or we reach the size requested. * For UFS1 inodes per cylinder group are stored in an int16_t * so fs_ipg is limited to 2^15 - 1. */ for ( ; sblock.fs_fpg < maxblkspercg; sblock.fs_fpg += sblock.fs_frag) { sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode), INOPB(&sblock)); if (Oflag > 1 || (Oflag == 1 && sblock.fs_ipg <= 0x7fff)) { if (sblock.fs_size / sblock.fs_fpg < MINCYLGRPS) break; if (CGSIZE(&sblock) < (unsigned long)sblock.fs_bsize) continue; if (CGSIZE(&sblock) == (unsigned long)sblock.fs_bsize) break; } sblock.fs_fpg -= sblock.fs_frag; sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode), INOPB(&sblock)); break; } /* * Check to be sure that the last cylinder group has enough blocks * to be viable. If it is too small, reduce the number of blocks * per cylinder group which will have the effect of moving more * blocks into the last cylinder group. */ optimalfpg = sblock.fs_fpg; for (;;) { sblock.fs_ncg = howmany(sblock.fs_size, sblock.fs_fpg); lastminfpg = roundup(sblock.fs_iblkno + sblock.fs_ipg / INOPF(&sblock), sblock.fs_frag); if (sblock.fs_size < lastminfpg) { printf("Filesystem size %jd < minimum size of %d\n", (intmax_t)sblock.fs_size, lastminfpg); exit(28); } if (sblock.fs_size % sblock.fs_fpg >= lastminfpg || sblock.fs_size % sblock.fs_fpg == 0) break; sblock.fs_fpg -= sblock.fs_frag; sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode), INOPB(&sblock)); } if (optimalfpg != sblock.fs_fpg) printf("Reduced frags per cylinder group from %d to %d %s\n", optimalfpg, sblock.fs_fpg, "to enlarge last cyl group"); sblock.fs_cgsize = fragroundup(&sblock, CGSIZE(&sblock)); sblock.fs_dblkno = sblock.fs_iblkno + sblock.fs_ipg / INOPF(&sblock); if (Oflag == 1) { sblock.fs_old_spc = sblock.fs_fpg * sblock.fs_old_nspf; sblock.fs_old_nsect = sblock.fs_old_spc; sblock.fs_old_npsect = sblock.fs_old_spc; sblock.fs_old_ncyl = sblock.fs_ncg; } /* * fill in remaining fields of the super block */ sblock.fs_csaddr = cgdmin(&sblock, 0); sblock.fs_cssize = fragroundup(&sblock, sblock.fs_ncg * sizeof(struct csum)); fscs = (struct csum *)calloc(1, sblock.fs_cssize); if (fscs == NULL) errx(31, "calloc failed"); sblock.fs_sbsize = fragroundup(&sblock, sizeof(struct fs)); if (sblock.fs_sbsize > SBLOCKSIZE) sblock.fs_sbsize = SBLOCKSIZE; sblock.fs_minfree = minfree; if (metaspace > 0 && metaspace < sblock.fs_fpg / 2) sblock.fs_metaspace = blknum(&sblock, metaspace); else if (metaspace != -1) /* reserve half of minfree for metadata blocks */ sblock.fs_metaspace = blknum(&sblock, (sblock.fs_fpg * minfree) / 200); if (maxbpg == 0) sblock.fs_maxbpg = MAXBLKPG(sblock.fs_bsize); else sblock.fs_maxbpg = maxbpg; sblock.fs_optim = opt; sblock.fs_cgrotor = 0; sblock.fs_pendingblocks = 0; sblock.fs_pendinginodes = 0; sblock.fs_fmod = 0; sblock.fs_ronly = 0; sblock.fs_state = 0; sblock.fs_clean = 1; sblock.fs_id[0] = (long)utime; sblock.fs_id[1] = newfs_random(); sblock.fs_fsmnt[0] = '\0'; csfrags = howmany(sblock.fs_cssize, sblock.fs_fsize); sblock.fs_dsize = sblock.fs_size - sblock.fs_sblkno - sblock.fs_ncg * (sblock.fs_dblkno - sblock.fs_sblkno); sblock.fs_cstotal.cs_nbfree = fragstoblks(&sblock, sblock.fs_dsize) - howmany(csfrags, sblock.fs_frag); sblock.fs_cstotal.cs_nffree = fragnum(&sblock, sblock.fs_size) + (fragnum(&sblock, csfrags) > 0 ? sblock.fs_frag - fragnum(&sblock, csfrags) : 0); sblock.fs_cstotal.cs_nifree = sblock.fs_ncg * sblock.fs_ipg - ROOTINO; sblock.fs_cstotal.cs_ndir = 0; sblock.fs_dsize -= csfrags; sblock.fs_time = utime; if (Oflag == 1) { sblock.fs_old_time = utime; sblock.fs_old_dsize = sblock.fs_dsize; sblock.fs_old_csaddr = sblock.fs_csaddr; sblock.fs_old_cstotal.cs_ndir = sblock.fs_cstotal.cs_ndir; sblock.fs_old_cstotal.cs_nbfree = sblock.fs_cstotal.cs_nbfree; sblock.fs_old_cstotal.cs_nifree = sblock.fs_cstotal.cs_nifree; sblock.fs_old_cstotal.cs_nffree = sblock.fs_cstotal.cs_nffree; } /* * Dump out summary information about file system. */ # define B2MBFACTOR (1 / (1024.0 * 1024.0)) printf("%s: %.1fMB (%jd sectors) block size %d, fragment size %d\n", fsys, (float)sblock.fs_size * sblock.fs_fsize * B2MBFACTOR, (intmax_t)fsbtodb(&sblock, sblock.fs_size), sblock.fs_bsize, sblock.fs_fsize); printf("\tusing %d cylinder groups of %.2fMB, %d blks, %d inodes.\n", sblock.fs_ncg, (float)sblock.fs_fpg * sblock.fs_fsize * B2MBFACTOR, sblock.fs_fpg / sblock.fs_frag, sblock.fs_ipg); if (sblock.fs_flags & FS_DOSOFTDEP) printf("\twith soft updates\n"); # undef B2MBFACTOR if (Eflag && !Nflag) { printf("Erasing sectors [%jd...%jd]\n", sblock.fs_sblockloc / disk.d_bsize, fsbtodb(&sblock, sblock.fs_size) - 1); berase(&disk, sblock.fs_sblockloc / disk.d_bsize, sblock.fs_size * sblock.fs_fsize - sblock.fs_sblockloc); } /* * Wipe out old UFS1 superblock(s) if necessary. */ if (!Nflag && Oflag != 1) { i = bread(&disk, part_ofs + SBLOCK_UFS1 / disk.d_bsize, chdummy, SBLOCKSIZE); if (i == -1) err(1, "can't read old UFS1 superblock: %s", disk.d_error); if (fsdummy.fs_magic == FS_UFS1_MAGIC) { fsdummy.fs_magic = 0; bwrite(&disk, part_ofs + SBLOCK_UFS1 / disk.d_bsize, chdummy, SBLOCKSIZE); for (cg = 0; cg < fsdummy.fs_ncg; cg++) { if (fsbtodb(&fsdummy, cgsblock(&fsdummy, cg)) > fssize) break; bwrite(&disk, part_ofs + fsbtodb(&fsdummy, cgsblock(&fsdummy, cg)), chdummy, SBLOCKSIZE); } } } if (!Nflag) do_sbwrite(&disk); if (Xflag == 1) { printf("** Exiting on Xflag 1\n"); exit(0); } if (Xflag == 2) printf("** Leaving BAD MAGIC on Xflag 2\n"); else sblock.fs_magic = (Oflag != 1) ? FS_UFS2_MAGIC : FS_UFS1_MAGIC; /* * Now build the cylinders group blocks and * then print out indices of cylinder groups. */ printf("super-block backups (for fsck -b #) at:\n"); i = 0; width = charsperline(); /* * allocate space for superblock, cylinder group map, and * two sets of inode blocks. */ if (sblock.fs_bsize < SBLOCKSIZE) iobufsize = SBLOCKSIZE + 3 * sblock.fs_bsize; else iobufsize = 4 * sblock.fs_bsize; if ((iobuf = calloc(1, iobufsize)) == 0) { printf("Cannot allocate I/O buffer\n"); exit(38); } /* * Make a copy of the superblock into the buffer that we will be * writing out in each cylinder group. */ bcopy((char *)&sblock, iobuf, SBLOCKSIZE); for (cg = 0; cg < sblock.fs_ncg; cg++) { initcg(cg, utime); j = snprintf(tmpbuf, sizeof(tmpbuf), " %jd%s", (intmax_t)fsbtodb(&sblock, cgsblock(&sblock, cg)), cg < (sblock.fs_ncg-1) ? "," : ""); if (j < 0) tmpbuf[j = 0] = '\0'; if (i + j >= width) { printf("\n"); i = 0; } i += j; printf("%s", tmpbuf); fflush(stdout); } printf("\n"); if (Nflag) exit(0); /* * Now construct the initial file system, * then write out the super-block. */ fsinit(utime); if (Oflag == 1) { sblock.fs_old_cstotal.cs_ndir = sblock.fs_cstotal.cs_ndir; sblock.fs_old_cstotal.cs_nbfree = sblock.fs_cstotal.cs_nbfree; sblock.fs_old_cstotal.cs_nifree = sblock.fs_cstotal.cs_nifree; sblock.fs_old_cstotal.cs_nffree = sblock.fs_cstotal.cs_nffree; } if (Xflag == 3) { printf("** Exiting on Xflag 3\n"); exit(0); } if (!Nflag) { do_sbwrite(&disk); /* * For UFS1 filesystems with a blocksize of 64K, the first * alternate superblock resides at the location used for * the default UFS2 superblock. As there is a valid * superblock at this location, the boot code will use * it as its first choice. Thus we have to ensure that * all of its statistcs on usage are correct. */ if (Oflag == 1 && sblock.fs_bsize == 65536) wtfs(fsbtodb(&sblock, cgsblock(&sblock, 0)), sblock.fs_bsize, (char *)&sblock); } for (i = 0; i < sblock.fs_cssize; i += sblock.fs_bsize) wtfs(fsbtodb(&sblock, sblock.fs_csaddr + numfrags(&sblock, i)), sblock.fs_cssize - i < sblock.fs_bsize ? sblock.fs_cssize - i : sblock.fs_bsize, ((char *)fscs) + i); /* * Update information about this partition in pack * label, to that it may be updated on disk. */ if (pp != NULL) { pp->p_fstype = FS_BSDFFS; pp->p_fsize = sblock.fs_fsize; pp->p_frag = sblock.fs_frag; pp->p_cpg = sblock.fs_fpg; } }
/* * Determine whether a block can be allocated. * * Check to see if a block of the appropriate size is available, * and if it is, allocate it. */ static daddr_t ffs_alloccg(struct inode *ip, int cg, daddr_t bpref, int size) { struct cg *cgp; struct buf *bp; daddr_t bno, blkno; int error, frags, allocsiz, i; struct fs *fs = ip->i_fs; const int needswap = UFS_FSNEEDSWAP(fs); if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) return (0); error = bread(ip->i_fd, ip->i_fs, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize, &bp); if (error) { brelse(bp); return (0); } cgp = (struct cg *)bp->b_data; if (!cg_chkmagic_swap(cgp, needswap) || (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) { brelse(bp); return (0); } if (size == fs->fs_bsize) { bno = ffs_alloccgblk(ip, bp, bpref); bdwrite(bp); return (bno); } /* * check to see if any fragments are already available * allocsiz is the size which will be allocated, hacking * it down to a smaller size if necessary */ frags = numfrags(fs, size); for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) if (cgp->cg_frsum[allocsiz] != 0) break; if (allocsiz == fs->fs_frag) { /* * no fragments were available, so a block will be * allocated, and hacked up */ if (cgp->cg_cs.cs_nbfree == 0) { brelse(bp); return (0); } bno = ffs_alloccgblk(ip, bp, bpref); bpref = dtogd(fs, bno); for (i = frags; i < fs->fs_frag; i++) setbit(cg_blksfree_swap(cgp, needswap), bpref + i); i = fs->fs_frag - frags; ufs_add32(cgp->cg_cs.cs_nffree, i, needswap); fs->fs_cstotal.cs_nffree += i; fs->fs_cs(fs, cg).cs_nffree += i; fs->fs_fmod = 1; ufs_add32(cgp->cg_frsum[i], 1, needswap); bdwrite(bp); return (bno); } bno = ffs_mapsearch(fs, cgp, bpref, allocsiz); for (i = 0; i < frags; i++) clrbit(cg_blksfree_swap(cgp, needswap), bno + i); ufs_add32(cgp->cg_cs.cs_nffree, -frags, needswap); fs->fs_cstotal.cs_nffree -= frags; fs->fs_cs(fs, cg).cs_nffree -= frags; fs->fs_fmod = 1; ufs_add32(cgp->cg_frsum[allocsiz], -1, needswap); if (frags != allocsiz) ufs_add32(cgp->cg_frsum[allocsiz - frags], 1, needswap); blkno = cg * fs->fs_fpg + bno; bdwrite(bp); return blkno; }
/* * Truncate the inode oip to at most length size, freeing the * disk blocks. */ int ext2_truncate(struct vnode *vp, off_t length, int flags, struct ucred *cred) { struct vnode *ovp = vp; daddr_t lastblock; struct inode *oip; daddr_t bn, lbn, lastiblock[NIADDR], indir_lbn[NIADDR]; daddr_t oldblks[NDADDR + NIADDR], newblks[NDADDR + NIADDR]; struct ext2_sb_info *fs; struct buf *bp; int offset, size, level; long count, nblocks, blocksreleased = 0; int i; int aflags, error, allerror; off_t osize; /* kprintf("ext2_truncate called %d to %d\n", VTOI(ovp)->i_number, length); */ /* * negative file sizes will totally break the code below and * are not meaningful anyways. */ if (length < 0) return EFBIG; oip = VTOI(ovp); if (ovp->v_type == VLNK && oip->i_size < ovp->v_mount->mnt_maxsymlinklen) { #if DIAGNOSTIC if (length != 0) panic("ext2_truncate: partial truncate of symlink"); #endif bzero((char *)&oip->i_shortlink, (u_int)oip->i_size); oip->i_size = 0; oip->i_flag |= IN_CHANGE | IN_UPDATE; return (EXT2_UPDATE(ovp, 1)); } if (oip->i_size == length) { oip->i_flag |= IN_CHANGE | IN_UPDATE; return (EXT2_UPDATE(ovp, 0)); } #if QUOTA if ((error = ext2_getinoquota(oip)) != 0) return (error); #endif fs = oip->i_e2fs; osize = oip->i_size; ext2_discard_prealloc(oip); /* * Lengthen the size of the file. We must ensure that the * last byte of the file is allocated. Since the smallest * value of osize is 0, length will be at least 1. */ if (osize < length) { offset = blkoff(fs, length - 1); lbn = lblkno(fs, length - 1); aflags = B_CLRBUF; if (flags & IO_SYNC) aflags |= B_SYNC; vnode_pager_setsize(ovp, length); error = ext2_balloc(oip, lbn, offset + 1, cred, &bp, aflags); if (error) { vnode_pager_setsize(ovp, osize); return (error); } oip->i_size = length; if (aflags & IO_SYNC) bwrite(bp); else bawrite(bp); oip->i_flag |= IN_CHANGE | IN_UPDATE; return (EXT2_UPDATE(ovp, 1)); } /* * Shorten the size of the file. If the file is not being * truncated to a block boundry, the contents of the * partial block following the end of the file must be * zero'ed in case it ever become accessable again because * of subsequent file growth. */ /* I don't understand the comment above */ offset = blkoff(fs, length); if (offset == 0) { oip->i_size = length; } else { lbn = lblkno(fs, length); aflags = B_CLRBUF; if (flags & IO_SYNC) aflags |= B_SYNC; error = ext2_balloc(oip, lbn, offset, cred, &bp, aflags); if (error) return (error); oip->i_size = length; size = blksize(fs, oip, lbn); bzero((char *)bp->b_data + offset, (u_int)(size - offset)); allocbuf(bp, size); if (aflags & IO_SYNC) bwrite(bp); else bawrite(bp); } /* * Calculate index into inode's block list of * last direct and indirect blocks (if any) * which we want to keep. Lastblock is -1 when * the file is truncated to 0. */ lastblock = lblkno(fs, length + fs->s_blocksize - 1) - 1; lastiblock[SINGLE] = lastblock - NDADDR; lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs); lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs); nblocks = btodb(fs->s_blocksize); /* * Update file and block pointers on disk before we start freeing * blocks. If we crash before free'ing blocks below, the blocks * will be returned to the free list. lastiblock values are also * normalized to -1 for calls to ext2_indirtrunc below. */ bcopy((caddr_t)&oip->i_db[0], (caddr_t)oldblks, sizeof oldblks); for (level = TRIPLE; level >= SINGLE; level--) if (lastiblock[level] < 0) { oip->i_ib[level] = 0; lastiblock[level] = -1; } for (i = NDADDR - 1; i > lastblock; i--) oip->i_db[i] = 0; oip->i_flag |= IN_CHANGE | IN_UPDATE; allerror = EXT2_UPDATE(ovp, 1); /* * Having written the new inode to disk, save its new configuration * and put back the old block pointers long enough to process them. * Note that we save the new block configuration so we can check it * when we are done. */ bcopy((caddr_t)&oip->i_db[0], (caddr_t)newblks, sizeof newblks); bcopy((caddr_t)oldblks, (caddr_t)&oip->i_db[0], sizeof oldblks); oip->i_size = osize; error = vtruncbuf(ovp, length, (int)fs->s_blocksize); if (error && (allerror == 0)) allerror = error; /* * Indirect blocks first. */ indir_lbn[SINGLE] = -NDADDR; indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1; indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1; for (level = TRIPLE; level >= SINGLE; level--) { bn = oip->i_ib[level]; if (bn != 0) { error = ext2_indirtrunc(oip, indir_lbn[level], fsbtodoff(fs, bn), lastiblock[level], level, &count); if (error) allerror = error; blocksreleased += count; if (lastiblock[level] < 0) { oip->i_ib[level] = 0; ext2_blkfree(oip, bn, fs->s_frag_size); blocksreleased += nblocks; } } if (lastiblock[level] >= 0) goto done; } /* * All whole direct blocks or frags. */ for (i = NDADDR - 1; i > lastblock; i--) { long bsize; bn = oip->i_db[i]; if (bn == 0) continue; oip->i_db[i] = 0; bsize = blksize(fs, oip, i); ext2_blkfree(oip, bn, bsize); blocksreleased += btodb(bsize); } if (lastblock < 0) goto done; /* * Finally, look for a change in size of the * last direct block; release any frags. */ bn = oip->i_db[lastblock]; if (bn != 0) { long oldspace, newspace; /* * Calculate amount of space we're giving * back as old block size minus new block size. */ oldspace = blksize(fs, oip, lastblock); oip->i_size = length; newspace = blksize(fs, oip, lastblock); if (newspace == 0) panic("itrunc: newspace"); if (oldspace - newspace > 0) { /* * Block number of space to be free'd is * the old block # plus the number of frags * required for the storage we're keeping. */ bn += numfrags(fs, newspace); ext2_blkfree(oip, bn, oldspace - newspace); blocksreleased += btodb(oldspace - newspace); } } done: #if DIAGNOSTIC for (level = SINGLE; level <= TRIPLE; level++) if (newblks[NDADDR + level] != oip->i_ib[level]) panic("itrunc1"); for (i = 0; i < NDADDR; i++) if (newblks[i] != oip->i_db[i]) panic("itrunc2"); if (length == 0 && (!RB_EMPTY(&ovp->v_rbdirty_tree) || !RB_EMPTY(&ovp->v_rbclean_tree))) panic("itrunc3"); #endif /* DIAGNOSTIC */ /* * Put back the real size. */ oip->i_size = length; oip->i_blocks -= blocksreleased; if (oip->i_blocks < 0) /* sanity */ oip->i_blocks = 0; oip->i_flag |= IN_CHANGE; vnode_pager_setsize(ovp, length); #if QUOTA ext2_chkdq(oip, -blocksreleased, NOCRED, 0); #endif return (allerror); }
/* * Here we actually start growing the file system. We basically read the * cylinder summary from the first cylinder group as we want to update * this on the fly during our various operations. First we handle the * changes in the former last cylinder group. Afterwards we create all new * cylinder groups. Now we handle the cylinder group containing the * cylinder summary which might result in a relocation of the whole * structure. In the end we write back the updated cylinder summary, the * new superblock, and slightly patched versions of the super block * copies. */ static void growfs(int fsi, int fso, unsigned int Nflag) { DBG_FUNC("growfs") time_t modtime; uint cylno; int i, j, width; char tmpbuf[100]; DBG_ENTER; time(&modtime); /* * Get the cylinder summary into the memory. */ fscs = (struct csum *)calloc((size_t)1, (size_t)sblock.fs_cssize); if (fscs == NULL) errx(1, "calloc failed"); for (i = 0; i < osblock.fs_cssize; i += osblock.fs_bsize) { rdfs(fsbtodb(&osblock, osblock.fs_csaddr + numfrags(&osblock, i)), (size_t)MIN(osblock.fs_cssize - i, osblock.fs_bsize), (void *)(((char *)fscs) + i), fsi); } #ifdef FS_DEBUG { struct csum *dbg_csp; u_int32_t dbg_csc; char dbg_line[80]; dbg_csp = fscs; for (dbg_csc = 0; dbg_csc < osblock.fs_ncg; dbg_csc++) { snprintf(dbg_line, sizeof(dbg_line), "%d. old csum in old location", dbg_csc); DBG_DUMP_CSUM(&osblock, dbg_line, dbg_csp++); } } #endif /* FS_DEBUG */ DBG_PRINT0("fscs read\n"); /* * Do all needed changes in the former last cylinder group. */ updjcg(osblock.fs_ncg - 1, modtime, fsi, fso, Nflag); /* * Dump out summary information about file system. */ #ifdef FS_DEBUG #define B2MBFACTOR (1 / (1024.0 * 1024.0)) printf("growfs: %.1fMB (%jd sectors) block size %d, fragment size %d\n", (float)sblock.fs_size * sblock.fs_fsize * B2MBFACTOR, (intmax_t)fsbtodb(&sblock, sblock.fs_size), sblock.fs_bsize, sblock.fs_fsize); printf("\tusing %d cylinder groups of %.2fMB, %d blks, %d inodes.\n", sblock.fs_ncg, (float)sblock.fs_fpg * sblock.fs_fsize * B2MBFACTOR, sblock.fs_fpg / sblock.fs_frag, sblock.fs_ipg); if (sblock.fs_flags & FS_DOSOFTDEP) printf("\twith soft updates\n"); #undef B2MBFACTOR #endif /* FS_DEBUG */ /* * Now build the cylinders group blocks and * then print out indices of cylinder groups. */ printf("super-block backups (for fsck_ffs -b #) at:\n"); i = 0; width = charsperline(); /* * Iterate for only the new cylinder groups. */ for (cylno = osblock.fs_ncg; cylno < sblock.fs_ncg; cylno++) { initcg(cylno, modtime, fso, Nflag); j = sprintf(tmpbuf, " %jd%s", (intmax_t)fsbtodb(&sblock, cgsblock(&sblock, cylno)), cylno < (sblock.fs_ncg - 1) ? "," : "" ); if (i + j >= width) { printf("\n"); i = 0; } i += j; printf("%s", tmpbuf); fflush(stdout); } printf("\n"); /* * Do all needed changes in the first cylinder group. * allocate blocks in new location */ updcsloc(modtime, fsi, fso, Nflag); /* * Now write the cylinder summary back to disk. */ for (i = 0; i < sblock.fs_cssize; i += sblock.fs_bsize) { wtfs(fsbtodb(&sblock, sblock.fs_csaddr + numfrags(&sblock, i)), (size_t)MIN(sblock.fs_cssize - i, sblock.fs_bsize), (void *)(((char *)fscs) + i), fso, Nflag); } DBG_PRINT0("fscs written\n"); #ifdef FS_DEBUG { struct csum *dbg_csp; u_int32_t dbg_csc; char dbg_line[80]; dbg_csp = fscs; for (dbg_csc = 0; dbg_csc < sblock.fs_ncg; dbg_csc++) { snprintf(dbg_line, sizeof(dbg_line), "%d. new csum in new location", dbg_csc); DBG_DUMP_CSUM(&sblock, dbg_line, dbg_csp++); } } #endif /* FS_DEBUG */ /* * Now write the new superblock back to disk. */ sblock.fs_time = modtime; wtfs(sblockloc, (size_t)SBLOCKSIZE, (void *)&sblock, fso, Nflag); DBG_PRINT0("sblock written\n"); DBG_DUMP_FS(&sblock, "new initial sblock"); /* * Clean up the dynamic fields in our superblock copies. */ sblock.fs_fmod = 0; sblock.fs_clean = 1; sblock.fs_ronly = 0; sblock.fs_cgrotor = 0; sblock.fs_state = 0; memset((void *)&sblock.fs_fsmnt, 0, sizeof(sblock.fs_fsmnt)); sblock.fs_flags &= FS_DOSOFTDEP; /* * XXX * The following fields are currently distributed from the superblock * to the copies: * fs_minfree * fs_rotdelay * fs_maxcontig * fs_maxbpg * fs_minfree, * fs_optim * fs_flags regarding SOFTPDATES * * We probably should rather change the summary for the cylinder group * statistics here to the value of what would be in there, if the file * system were created initially with the new size. Therefor we still * need to find an easy way of calculating that. * Possibly we can try to read the first superblock copy and apply the * "diffed" stats between the old and new superblock by still copying * certain parameters onto that. */ /* * Write out the duplicate super blocks. */ for (cylno = 0; cylno < sblock.fs_ncg; cylno++) { wtfs(fsbtodb(&sblock, cgsblock(&sblock, cylno)), (size_t)SBLOCKSIZE, (void *)&sblock, fso, Nflag); } DBG_PRINT0("sblock copies written\n"); DBG_DUMP_FS(&sblock, "new other sblocks"); DBG_LEAVE; return; }
int ckinode(struct ufs1_dinode *dp, struct inodesc *idesc) { ufs_daddr_t *ap; int ret; long n, ndb, offset; struct ufs1_dinode dino; quad_t remsize, sizepb; mode_t mode; char pathbuf[MAXPATHLEN + 1]; if (idesc->id_fix != IGNORE) idesc->id_fix = DONTKNOW; idesc->id_entryno = 0; idesc->id_filesize = dp->di_size; mode = dp->di_mode & IFMT; if (mode == IFBLK || mode == IFCHR || (mode == IFLNK && dp->di_size < (unsigned)sblock.fs_maxsymlinklen)) return (KEEPON); dino = *dp; ndb = howmany(dino.di_size, sblock.fs_bsize); for (ap = &dino.di_db[0]; ap < &dino.di_db[NDADDR]; ap++) { if (--ndb == 0 && (offset = blkoff(&sblock, dino.di_size)) != 0) idesc->id_numfrags = numfrags(&sblock, fragroundup(&sblock, offset)); else idesc->id_numfrags = sblock.fs_frag; if (*ap == 0) { if (idesc->id_type == DATA && ndb >= 0) { /* An empty block in a directory XXX */ getpathname(pathbuf, idesc->id_number, idesc->id_number); pfatal("DIRECTORY %s: CONTAINS EMPTY BLOCKS", pathbuf); if (reply("ADJUST LENGTH") == 1) { dp = ginode(idesc->id_number); dp->di_size = (ap - &dino.di_db[0]) * sblock.fs_bsize; printf( "YOU MUST RERUN FSCK AFTERWARDS\n"); rerun = 1; inodirty(); } } continue; } idesc->id_blkno = *ap; if (idesc->id_type == ADDR) ret = (*idesc->id_func)(idesc); else ret = dirscan(idesc); if (ret & STOP) return (ret); } idesc->id_numfrags = sblock.fs_frag; remsize = dino.di_size - sblock.fs_bsize * NDADDR; sizepb = sblock.fs_bsize; for (ap = &dino.di_ib[0], n = 1; n <= NIADDR; ap++, n++) { if (*ap) { idesc->id_blkno = *ap; ret = iblock(idesc, n, remsize); if (ret & STOP) return (ret); } else { if (idesc->id_type == DATA && remsize > 0) { /* An empty block in a directory XXX */ getpathname(pathbuf, idesc->id_number, idesc->id_number); pfatal("DIRECTORY %s: CONTAINS EMPTY BLOCKS", pathbuf); if (reply("ADJUST LENGTH") == 1) { dp = ginode(idesc->id_number); dp->di_size -= remsize; remsize = 0; printf( "YOU MUST RERUN FSCK AFTERWARDS\n"); rerun = 1; inodirty(); break; } } } sizepb *= NINDIR(&sblock); remsize -= sizepb; } return (KEEPON); }
int ckinode(union dinode *dp, struct inodesc *idesc) { off_t remsize, sizepb; int i, offset, ret; union dinode dino; ufs2_daddr_t ndb; mode_t mode; char pathbuf[MAXPATHLEN + 1]; if (idesc->id_fix != IGNORE) idesc->id_fix = DONTKNOW; idesc->id_lbn = -1; idesc->id_entryno = 0; idesc->id_filesize = DIP(dp, di_size); mode = DIP(dp, di_mode) & IFMT; if (mode == IFBLK || mode == IFCHR || (mode == IFLNK && DIP(dp, di_size) < (unsigned)sblock.fs_maxsymlinklen)) return (KEEPON); if (sblock.fs_magic == FS_UFS1_MAGIC) dino.dp1 = dp->dp1; else dino.dp2 = dp->dp2; ndb = howmany(DIP(&dino, di_size), sblock.fs_bsize); for (i = 0; i < NDADDR; i++) { idesc->id_lbn++; if (--ndb == 0 && (offset = blkoff(&sblock, DIP(&dino, di_size))) != 0) idesc->id_numfrags = numfrags(&sblock, fragroundup(&sblock, offset)); else idesc->id_numfrags = sblock.fs_frag; if (DIP(&dino, di_db[i]) == 0) { if (idesc->id_type == DATA && ndb >= 0) { /* An empty block in a directory XXX */ getpathname(pathbuf, idesc->id_number, idesc->id_number); pfatal("DIRECTORY %s: CONTAINS EMPTY BLOCKS", pathbuf); if (reply("ADJUST LENGTH") == 1) { dp = ginode(idesc->id_number); DIP_SET(dp, di_size, i * sblock.fs_bsize); printf( "YOU MUST RERUN FSCK AFTERWARDS\n"); rerun = 1; inodirty(); } } continue; } idesc->id_blkno = DIP(&dino, di_db[i]); if (idesc->id_type != DATA) ret = (*idesc->id_func)(idesc); else ret = dirscan(idesc); if (ret & STOP) return (ret); } idesc->id_numfrags = sblock.fs_frag; remsize = DIP(&dino, di_size) - sblock.fs_bsize * NDADDR; sizepb = sblock.fs_bsize; for (i = 0; i < NIADDR; i++) { sizepb *= NINDIR(&sblock); if (DIP(&dino, di_ib[i])) { idesc->id_blkno = DIP(&dino, di_ib[i]); ret = iblock(idesc, i + 1, remsize, BT_LEVEL1 + i); if (ret & STOP) return (ret); } else { idesc->id_lbn += sizepb / sblock.fs_bsize; if (idesc->id_type == DATA && remsize > 0) { /* An empty block in a directory XXX */ getpathname(pathbuf, idesc->id_number, idesc->id_number); pfatal("DIRECTORY %s: CONTAINS EMPTY BLOCKS", pathbuf); if (reply("ADJUST LENGTH") == 1) { dp = ginode(idesc->id_number); DIP_SET(dp, di_size, DIP(dp, di_size) - remsize); remsize = 0; printf( "YOU MUST RERUN FSCK AFTERWARDS\n"); rerun = 1; inodirty(); break; } } } remsize -= sizepb; } return (KEEPON); }
/* * Free a block or fragment. * * The specified block or fragment is placed back in the * free map. If a fragment is deallocated, a possible * block reassembly is checked. */ void ffs_blkfree(struct inode *ip, daddr_t bno, long size) { struct cg *cgp; struct buf *bp; int32_t fragno, cgbno; int i, error, cg, blk, frags, bbase; struct fs *fs = ip->i_fs; const int needswap = UFS_FSNEEDSWAP(fs); if (size > fs->fs_bsize || fragoff(fs, size) != 0 || fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) { errx(1, "blkfree: bad size: bno %lld bsize %d size %ld", (long long)bno, fs->fs_bsize, size); } cg = dtog(fs, bno); if (bno >= fs->fs_size) { warnx("bad block %lld, ino %ju", (long long)bno, (uintmax_t)ip->i_number); return; } error = bread(ip->i_fd, ip->i_fs, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize, &bp); if (error) { brelse(bp); return; } cgp = (struct cg *)bp->b_data; if (!cg_chkmagic_swap(cgp, needswap)) { brelse(bp); return; } cgbno = dtogd(fs, bno); if (size == fs->fs_bsize) { fragno = fragstoblks(fs, cgbno); if (!ffs_isfreeblock(fs, cg_blksfree_swap(cgp, needswap), fragno)) { errx(1, "blkfree: freeing free block %lld", (long long)bno); } ffs_setblock(fs, cg_blksfree_swap(cgp, needswap), fragno); ffs_clusteracct(fs, cgp, fragno, 1); ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap); fs->fs_cstotal.cs_nbfree++; fs->fs_cs(fs, cg).cs_nbfree++; } else { bbase = cgbno - fragnum(fs, cgbno); /* * decrement the counts associated with the old frags */ blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bbase); ffs_fragacct_swap(fs, blk, cgp->cg_frsum, -1, needswap); /* * deallocate the fragment */ frags = numfrags(fs, size); for (i = 0; i < frags; i++) { if (isset(cg_blksfree_swap(cgp, needswap), cgbno + i)) { errx(1, "blkfree: freeing free frag: block %lld", (long long)(cgbno + i)); } setbit(cg_blksfree_swap(cgp, needswap), cgbno + i); } ufs_add32(cgp->cg_cs.cs_nffree, i, needswap); fs->fs_cstotal.cs_nffree += i; fs->fs_cs(fs, cg).cs_nffree += i; /* * add back in counts associated with the new frags */ blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bbase); ffs_fragacct_swap(fs, blk, cgp->cg_frsum, 1, needswap); /* * if a complete block has been reassembled, account for it */ fragno = fragstoblks(fs, bbase); if (ffs_isblock(fs, cg_blksfree_swap(cgp, needswap), fragno)) { ufs_add32(cgp->cg_cs.cs_nffree, -fs->fs_frag, needswap); fs->fs_cstotal.cs_nffree -= fs->fs_frag; fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; ffs_clusteracct(fs, cgp, fragno, 1); ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap); fs->fs_cstotal.cs_nbfree++; fs->fs_cs(fs, cg).cs_nbfree++; } } fs->fs_fmod = 1; bdwrite(bp); }
/* * Truncate the inode oip to at most length size, freeing the * disk blocks. */ int ffs_truncate(struct inode *oip, off_t length, int flags, struct ucred *cred) { struct vnode *ovp; daddr64_t lastblock; daddr64_t bn, lbn, lastiblock[NIADDR], indir_lbn[NIADDR]; daddr64_t oldblks[NDADDR + NIADDR], newblks[NDADDR + NIADDR]; struct fs *fs; struct buf *bp; int offset, size, level; long count, nblocks, vflags, blocksreleased = 0; int i, aflags, error, allerror, indirect = 0; off_t osize; extern int num_indirdep; extern int max_indirdep; if (length < 0) return (EINVAL); ovp = ITOV(oip); if (ovp->v_type != VREG && ovp->v_type != VDIR && ovp->v_type != VLNK) return (0); if (DIP(oip, size) == length) return (0); if (ovp->v_type == VLNK && (DIP(oip, size) < ovp->v_mount->mnt_maxsymlinklen || (ovp->v_mount->mnt_maxsymlinklen == 0 && oip->i_din1->di_blocks == 0))) { #ifdef DIAGNOSTIC if (length != 0) panic("ffs_truncate: partial truncate of symlink"); #endif memset(SHORTLINK(oip), 0, (size_t) DIP(oip, size)); DIP_ASSIGN(oip, size, 0); oip->i_flag |= IN_CHANGE | IN_UPDATE; return (UFS_UPDATE(oip, MNT_WAIT)); } if ((error = getinoquota(oip)) != 0) return (error); uvm_vnp_setsize(ovp, length); oip->i_ci.ci_lasta = oip->i_ci.ci_clen = oip->i_ci.ci_cstart = oip->i_ci.ci_lastw = 0; if (DOINGSOFTDEP(ovp)) { if (length > 0 || softdep_slowdown(ovp)) { /* * If a file is only partially truncated, then * we have to clean up the data structures * describing the allocation past the truncation * point. Finding and deallocating those structures * is a lot of work. Since partial truncation occurs * rarely, we solve the problem by syncing the file * so that it will have no data structures left. */ if ((error = VOP_FSYNC(ovp, cred, MNT_WAIT)) != 0) return (error); } else { (void)ufs_quota_free_blocks(oip, DIP(oip, blocks), NOCRED); softdep_setup_freeblocks(oip, length); (void) vinvalbuf(ovp, 0, cred, curproc, 0, 0); oip->i_flag |= IN_CHANGE | IN_UPDATE; return (UFS_UPDATE(oip, 0)); } } fs = oip->i_fs; osize = DIP(oip, size); /* * Lengthen the size of the file. We must ensure that the * last byte of the file is allocated. Since the smallest * value of osize is 0, length will be at least 1. */ if (osize < length) { if (length > fs->fs_maxfilesize) return (EFBIG); aflags = B_CLRBUF; if (flags & IO_SYNC) aflags |= B_SYNC; error = UFS_BUF_ALLOC(oip, length - 1, 1, cred, aflags, &bp); if (error) return (error); if (bp->b_lblkno >= NDADDR) indirect = 1; DIP_ASSIGN(oip, size, length); uvm_vnp_setsize(ovp, length); (void) uvm_vnp_uncache(ovp); if (aflags & B_SYNC) bwrite(bp); else bawrite(bp); oip->i_flag |= IN_CHANGE | IN_UPDATE; error = UFS_UPDATE(oip, MNT_WAIT); if (DOINGSOFTDEP(ovp) && num_indirdep > max_indirdep) if (indirect) { /* * If the number of pending indirect block * dependencies is sufficiently close to the * maximum number of simultaneously mappable * buffers force a sync on the vnode to prevent * buffer cache exhaustion. */ VOP_FSYNC(ovp, curproc->p_ucred, MNT_WAIT); } return (error); } uvm_vnp_setsize(ovp, length); /* * Shorten the size of the file. If the file is not being * truncated to a block boundary, the contents of the * partial block following the end of the file must be * zero'ed in case it ever becomes accessible again because * of subsequent file growth. Directories however are not * zero'ed as they should grow back initialized to empty. */ offset = blkoff(fs, length); if (offset == 0) { DIP_ASSIGN(oip, size, length); } else { lbn = lblkno(fs, length); aflags = B_CLRBUF; if (flags & IO_SYNC) aflags |= B_SYNC; error = UFS_BUF_ALLOC(oip, length - 1, 1, cred, aflags, &bp); if (error) return (error); /* * When we are doing soft updates and the UFS_BALLOC * above fills in a direct block hole with a full sized * block that will be truncated down to a fragment below, * we must flush out the block dependency with an FSYNC * so that we do not get a soft updates inconsistency * when we create the fragment below. */ if (DOINGSOFTDEP(ovp) && lbn < NDADDR && fragroundup(fs, blkoff(fs, length)) < fs->fs_bsize && (error = VOP_FSYNC(ovp, cred, MNT_WAIT)) != 0) return (error); DIP_ASSIGN(oip, size, length); size = blksize(fs, oip, lbn); (void) uvm_vnp_uncache(ovp); if (ovp->v_type != VDIR) bzero((char *)bp->b_data + offset, (u_int)(size - offset)); bp->b_bcount = size; if (aflags & B_SYNC) bwrite(bp); else bawrite(bp); } /* * Calculate index into inode's block list of * last direct and indirect blocks (if any) * which we want to keep. Lastblock is -1 when * the file is truncated to 0. */ lastblock = lblkno(fs, length + fs->fs_bsize - 1) - 1; lastiblock[SINGLE] = lastblock - NDADDR; lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs); lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs); nblocks = btodb(fs->fs_bsize); /* * Update file and block pointers on disk before we start freeing * blocks. If we crash before free'ing blocks below, the blocks * will be returned to the free list. lastiblock values are also * normalized to -1 for calls to ffs_indirtrunc below. */ for (level = TRIPLE; level >= SINGLE; level--) { oldblks[NDADDR + level] = DIP(oip, ib[level]); if (lastiblock[level] < 0) { DIP_ASSIGN(oip, ib[level], 0); lastiblock[level] = -1; } } for (i = 0; i < NDADDR; i++) { oldblks[i] = DIP(oip, db[i]); if (i > lastblock) DIP_ASSIGN(oip, db[i], 0); } oip->i_flag |= IN_CHANGE | IN_UPDATE; if ((error = UFS_UPDATE(oip, MNT_WAIT)) != 0) allerror = error; /* * Having written the new inode to disk, save its new configuration * and put back the old block pointers long enough to process them. * Note that we save the new block configuration so we can check it * when we are done. */ for (i = 0; i < NDADDR; i++) { newblks[i] = DIP(oip, db[i]); DIP_ASSIGN(oip, db[i], oldblks[i]); } for (i = 0; i < NIADDR; i++) { newblks[NDADDR + i] = DIP(oip, ib[i]); DIP_ASSIGN(oip, ib[i], oldblks[NDADDR + i]); } DIP_ASSIGN(oip, size, osize); vflags = ((length > 0) ? V_SAVE : 0) | V_SAVEMETA; allerror = vinvalbuf(ovp, vflags, cred, curproc, 0, 0); /* * Indirect blocks first. */ indir_lbn[SINGLE] = -NDADDR; indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1; indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1; for (level = TRIPLE; level >= SINGLE; level--) { bn = DIP(oip, ib[level]); if (bn != 0) { error = ffs_indirtrunc(oip, indir_lbn[level], fsbtodb(fs, bn), lastiblock[level], level, &count); if (error) allerror = error; blocksreleased += count; if (lastiblock[level] < 0) { DIP_ASSIGN(oip, ib[level], 0); ffs_blkfree(oip, bn, fs->fs_bsize); blocksreleased += nblocks; } } if (lastiblock[level] >= 0) goto done; } /* * All whole direct blocks or frags. */ for (i = NDADDR - 1; i > lastblock; i--) { long bsize; bn = DIP(oip, db[i]); if (bn == 0) continue; DIP_ASSIGN(oip, db[i], 0); bsize = blksize(fs, oip, i); ffs_blkfree(oip, bn, bsize); blocksreleased += btodb(bsize); } if (lastblock < 0) goto done; /* * Finally, look for a change in size of the * last direct block; release any frags. */ bn = DIP(oip, db[lastblock]); if (bn != 0) { long oldspace, newspace; /* * Calculate amount of space we're giving * back as old block size minus new block size. */ oldspace = blksize(fs, oip, lastblock); DIP_ASSIGN(oip, size, length); newspace = blksize(fs, oip, lastblock); if (newspace == 0) panic("ffs_truncate: newspace"); if (oldspace - newspace > 0) { /* * Block number of space to be free'd is * the old block # plus the number of frags * required for the storage we're keeping. */ bn += numfrags(fs, newspace); ffs_blkfree(oip, bn, oldspace - newspace); blocksreleased += btodb(oldspace - newspace); } } done: #ifdef DIAGNOSTIC for (level = SINGLE; level <= TRIPLE; level++) if (newblks[NDADDR + level] != DIP(oip, ib[level])) panic("ffs_truncate1"); for (i = 0; i < NDADDR; i++) if (newblks[i] != DIP(oip, db[i])) panic("ffs_truncate2"); #endif /* DIAGNOSTIC */ /* * Put back the real size. */ DIP_ASSIGN(oip, size, length); DIP_ADD(oip, blocks, -blocksreleased); oip->i_flag |= IN_CHANGE; (void)ufs_quota_free_blocks(oip, blocksreleased, NOCRED); return (allerror); }
/* * Check validity of held blocks in an inode, recursing through all blocks. */ int ckinode(struct ufs1_dinode *dp, struct inodesc *idesc) { ufs_daddr_t *ap, lbn; long ret, n, ndb, offset; struct ufs1_dinode dino; u_int64_t remsize, sizepb; mode_t mode; char pathbuf[MAXPATHLEN + 1]; struct uvnode *vp, *thisvp; if (idesc->id_fix != IGNORE) idesc->id_fix = DONTKNOW; idesc->id_entryno = 0; idesc->id_filesize = dp->di_size; mode = dp->di_mode & IFMT; if (mode == IFBLK || mode == IFCHR || (mode == IFLNK && (dp->di_size < fs->lfs_maxsymlinklen || (fs->lfs_maxsymlinklen == 0 && dp->di_blocks == 0)))) return (KEEPON); dino = *dp; ndb = howmany(dino.di_size, fs->lfs_bsize); thisvp = vget(fs, idesc->id_number); for (lbn = 0; lbn < NDADDR; lbn++) { ap = dino.di_db + lbn; if (thisvp) idesc->id_numfrags = numfrags(fs, VTOI(thisvp)->i_lfs_fragsize[lbn]); else { if (--ndb == 0 && (offset = blkoff(fs, dino.di_size)) != 0) { idesc->id_numfrags = numfrags(fs, fragroundup(fs, offset)); } else idesc->id_numfrags = fs->lfs_frag; } if (*ap == 0) { if (idesc->id_type == DATA && ndb >= 0) { /* An empty block in a directory XXX */ getpathname(pathbuf, sizeof(pathbuf), idesc->id_number, idesc->id_number); pfatal("DIRECTORY %s INO %lld: CONTAINS EMPTY BLOCKS [1]", pathbuf, (long long)idesc->id_number); if (reply("ADJUST LENGTH") == 1) { vp = vget(fs, idesc->id_number); dp = VTOD(vp); dp->di_size = (ap - &dino.di_db[0]) * fs->lfs_bsize; printf( "YOU MUST RERUN FSCK AFTERWARDS\n"); rerun = 1; inodirty(VTOI(vp)); } else break; } continue; } idesc->id_blkno = *ap; idesc->id_lblkno = ap - &dino.di_db[0]; if (idesc->id_type == ADDR) { ret = (*idesc->id_func) (idesc); } else ret = dirscan(idesc); if (ret & STOP) return (ret); } idesc->id_numfrags = fs->lfs_frag; remsize = dino.di_size - fs->lfs_bsize * NDADDR; sizepb = fs->lfs_bsize; for (ap = &dino.di_ib[0], n = 1; n <= NIADDR; ap++, n++) { if (*ap) { idesc->id_blkno = *ap; ret = iblock(idesc, n, remsize); if (ret & STOP) return (ret); } else { if (idesc->id_type == DATA && remsize > 0) { /* An empty block in a directory XXX */ getpathname(pathbuf, sizeof(pathbuf), idesc->id_number, idesc->id_number); pfatal("DIRECTORY %s INO %lld: CONTAINS EMPTY BLOCKS [2]", pathbuf, (long long)idesc->id_number); if (reply("ADJUST LENGTH") == 1) { vp = vget(fs, idesc->id_number); dp = VTOD(vp); dp->di_size -= remsize; remsize = 0; printf( "YOU MUST RERUN FSCK AFTERWARDS\n"); rerun = 1; inodirty(VTOI(vp)); break; } else break; } } sizepb *= NINDIR(fs); remsize -= sizepb; } return (KEEPON); }
void mkfs(char *fsys, int fi, int fo, const char *mfscopy) { long i, mincpc, mincpg, inospercg; long cylno, rpos, blk, j, emitwarn = 0; long used, mincpgcnt, bpcg; off_t usedb; long mapcramped, inodecramped; long postblsize, rotblsize, totalsbsize; int status, fd; time_t utime; quad_t sizepb; int width; char tmpbuf[100]; /* XXX this will break in about 2,500 years */ time(&utime); #ifdef FSIRAND if (!randinit) { randinit = 1; srandomdev(); } #endif if (mfs) { int omask; pid_t child; mfs_ppid = getpid(); signal(SIGUSR1, parentready); if ((child = fork()) != 0) { /* * Parent */ if (child == -1) err(10, "mfs"); if (mfscopy) copyroot = FSCopy(©hlinks, mfscopy); signal(SIGUSR1, started); kill(child, SIGUSR1); while (waitpid(child, &status, 0) != child) ; exit(WEXITSTATUS(status)); /* NOTREACHED */ } /* * Child */ omask = sigblock(sigmask(SIGUSR1)); while (parentready_signalled == 0) sigpause(omask); sigsetmask(omask); if (filename != NULL) { unsigned char buf[BUFSIZ]; unsigned long l, l1; ssize_t w; fd = open(filename, O_RDWR|O_TRUNC|O_CREAT, 0644); if(fd < 0) err(12, "%s", filename); l1 = fssize * sectorsize; if (l1 > BUFSIZ) l1 = BUFSIZ; for (l = 0; l < fssize * (u_long)sectorsize; l += l1) { w = write(fd, buf, l1); if (w < 0 || (u_long)w != l1) err(12, "%s", filename); } membase = mmap(NULL, fssize * sectorsize, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); if (membase == MAP_FAILED) err(12, "mmap"); close(fd); } else { membase = mmap(NULL, fssize * sectorsize, PROT_READ|PROT_WRITE, MAP_SHARED|MAP_ANON, -1, 0); if (membase == MAP_FAILED) errx(13, "mmap (anonymous memory) failed"); } } fsi = fi; fso = fo; if (Oflag) { sblock.fs_inodefmt = FS_42INODEFMT; sblock.fs_maxsymlinklen = 0; } else { sblock.fs_inodefmt = FS_44INODEFMT; sblock.fs_maxsymlinklen = MAXSYMLINKLEN; } if (Uflag) sblock.fs_flags |= FS_DOSOFTDEP; if (Lflag) strlcpy(sblock.fs_volname, volumelabel, MAXVOLLEN); /* * Validate the given file system size. * Verify that its last block can actually be accessed. */ if (fssize == 0) printf("preposterous size %lu\n", fssize), exit(13); wtfs(fssize - (realsectorsize / DEV_BSIZE), realsectorsize, (char *)&sblock); /* * collect and verify the sector and track info */ sblock.fs_nsect = nsectors; sblock.fs_ntrak = ntracks; if (sblock.fs_ntrak <= 0) printf("preposterous ntrak %d\n", sblock.fs_ntrak), exit(14); if (sblock.fs_nsect <= 0) printf("preposterous nsect %d\n", sblock.fs_nsect), exit(15); /* * collect and verify the filesystem density info */ sblock.fs_avgfilesize = avgfilesize; sblock.fs_avgfpdir = avgfilesperdir; if (sblock.fs_avgfilesize <= 0) printf("illegal expected average file size %d\n", sblock.fs_avgfilesize), exit(14); if (sblock.fs_avgfpdir <= 0) printf("illegal expected number of files per directory %d\n", sblock.fs_avgfpdir), exit(15); /* * collect and verify the block and fragment sizes */ sblock.fs_bsize = bsize; sblock.fs_fsize = fsize; if (!POWEROF2(sblock.fs_bsize)) { printf("block size must be a power of 2, not %d\n", sblock.fs_bsize); exit(16); } if (!POWEROF2(sblock.fs_fsize)) { printf("fragment size must be a power of 2, not %d\n", sblock.fs_fsize); exit(17); } if (sblock.fs_fsize < sectorsize) { printf("fragment size %d is too small, minimum is %d\n", sblock.fs_fsize, sectorsize); exit(18); } if (sblock.fs_bsize < MINBSIZE) { printf("block size %d is too small, minimum is %d\n", sblock.fs_bsize, MINBSIZE); exit(19); } if (sblock.fs_bsize < sblock.fs_fsize) { printf("block size (%d) cannot be smaller than fragment size (%d)\n", sblock.fs_bsize, sblock.fs_fsize); exit(20); } sblock.fs_bmask = ~(sblock.fs_bsize - 1); sblock.fs_fmask = ~(sblock.fs_fsize - 1); sblock.fs_qbmask = ~sblock.fs_bmask; sblock.fs_qfmask = ~sblock.fs_fmask; for (sblock.fs_bshift = 0, i = sblock.fs_bsize; i > 1; i >>= 1) sblock.fs_bshift++; for (sblock.fs_fshift = 0, i = sblock.fs_fsize; i > 1; i >>= 1) sblock.fs_fshift++; sblock.fs_frag = numfrags(&sblock, sblock.fs_bsize); for (sblock.fs_fragshift = 0, i = sblock.fs_frag; i > 1; i >>= 1) sblock.fs_fragshift++; if (sblock.fs_frag > MAXFRAG) { printf("fragment size %d is too small, minimum with block size %d is %d\n", sblock.fs_fsize, sblock.fs_bsize, sblock.fs_bsize / MAXFRAG); exit(21); } sblock.fs_nrpos = nrpos; sblock.fs_nindir = sblock.fs_bsize / sizeof(daddr_t); sblock.fs_inopb = sblock.fs_bsize / sizeof(struct ufs1_dinode); sblock.fs_nspf = sblock.fs_fsize / sectorsize; for (sblock.fs_fsbtodb = 0, i = NSPF(&sblock); i > 1; i >>= 1) sblock.fs_fsbtodb++; sblock.fs_sblkno = roundup(howmany(bbsize + sbsize, sblock.fs_fsize), sblock.fs_frag); sblock.fs_cblkno = (daddr_t)(sblock.fs_sblkno + roundup(howmany(sbsize, sblock.fs_fsize), sblock.fs_frag)); sblock.fs_iblkno = sblock.fs_cblkno + sblock.fs_frag; sblock.fs_cgoffset = roundup( howmany(sblock.fs_nsect, NSPF(&sblock)), sblock.fs_frag); for (sblock.fs_cgmask = 0xffffffff, i = sblock.fs_ntrak; i > 1; i >>= 1) sblock.fs_cgmask <<= 1; if (!POWEROF2(sblock.fs_ntrak)) sblock.fs_cgmask <<= 1; sblock.fs_maxfilesize = sblock.fs_bsize * NDADDR - 1; for (sizepb = sblock.fs_bsize, i = 0; i < NIADDR; i++) { sizepb *= NINDIR(&sblock); sblock.fs_maxfilesize += sizepb; } /* * Validate specified/determined secpercyl * and calculate minimum cylinders per group. */ sblock.fs_spc = secpercyl; for (sblock.fs_cpc = NSPB(&sblock), i = sblock.fs_spc; sblock.fs_cpc > 1 && (i & 1) == 0; sblock.fs_cpc >>= 1, i >>= 1) /* void */; mincpc = sblock.fs_cpc; bpcg = sblock.fs_spc * sectorsize; inospercg = roundup(bpcg / sizeof(struct ufs1_dinode), INOPB(&sblock)); if (inospercg > MAXIPG(&sblock)) inospercg = MAXIPG(&sblock); used = (sblock.fs_iblkno + inospercg / INOPF(&sblock)) * NSPF(&sblock); mincpgcnt = howmany(sblock.fs_cgoffset * (~sblock.fs_cgmask) + used, sblock.fs_spc); mincpg = roundup(mincpgcnt, mincpc); /* * Ensure that cylinder group with mincpg has enough space * for block maps. */ sblock.fs_cpg = mincpg; sblock.fs_ipg = inospercg; if (maxcontig > 1) sblock.fs_contigsumsize = MIN(maxcontig, FS_MAXCONTIG); mapcramped = 0; while (CGSIZE(&sblock) > (uint32_t)sblock.fs_bsize) { mapcramped = 1; if (sblock.fs_bsize < MAXBSIZE) { sblock.fs_bsize <<= 1; if ((i & 1) == 0) { i >>= 1; } else {
/* * Truncate the inode ip to at most length size, freeing the * disk blocks. */ int ffs_truncate(vnode *vp, off_t length, int flags, Ucred *cred) { print("HARVEY TODO: %s\n", __func__); #if 0 struct inode *ip; ufs2_daddr_t bn, lbn, lastblock, lastiblock[UFS_NIADDR]; ufs2_daddr_t indir_lbn[UFS_NIADDR], oldblks[UFS_NDADDR + UFS_NIADDR]; ufs2_daddr_t newblks[UFS_NDADDR + UFS_NIADDR]; ufs2_daddr_t count, blocksreleased = 0, datablocks, blkno; struct bufobj *bo; struct fs *fs; struct buf *bp; struct ufsmount *ump; int softdeptrunc, journaltrunc; int needextclean, extblocks; int offset, size, level, nblocks; int i, error, allerror, indiroff, waitforupdate; off_t osize; ip = VTOI(vp); ump = VFSTOUFS(vp->v_mount); fs = ump->um_fs; bo = &vp->v_bufobj; ASSERT_VOP_LOCKED(vp, "ffs_truncate"); if (length < 0) return (EINVAL); if (length > fs->fs_maxfilesize) return (EFBIG); #ifdef QUOTA error = getinoquota(ip); if (error) return (error); #endif /* * Historically clients did not have to specify which data * they were truncating. So, if not specified, we assume * traditional behavior, e.g., just the normal data. */ if ((flags & (IO_EXT | IO_NORMAL)) == 0) flags |= IO_NORMAL; if (!DOINGSOFTDEP(vp) && !DOINGASYNC(vp)) flags |= IO_SYNC; waitforupdate = (flags & IO_SYNC) != 0 || !DOINGASYNC(vp); /* * If we are truncating the extended-attributes, and cannot * do it with soft updates, then do it slowly here. If we are * truncating both the extended attributes and the file contents * (e.g., the file is being unlinked), then pick it off with * soft updates below. */ allerror = 0; needextclean = 0; softdeptrunc = 0; journaltrunc = DOINGSUJ(vp); if (journaltrunc == 0 && DOINGSOFTDEP(vp) && length == 0) softdeptrunc = !softdep_slowdown(vp); extblocks = 0; datablocks = DIP(ip, i_blocks); if (fs->fs_magic == FS_UFS2_MAGIC && ip->i_din2->di_extsize > 0) { extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize)); datablocks -= extblocks; } if ((flags & IO_EXT) && extblocks > 0) { if (length != 0) panic("ffs_truncate: partial trunc of extdata"); if (softdeptrunc || journaltrunc) { if ((flags & IO_NORMAL) == 0) goto extclean; needextclean = 1; } else { if ((error = ffs_syncvnode(vp, MNT_WAIT, 0)) != 0) return (error); #ifdef QUOTA (void) chkdq(ip, -extblocks, NOCRED, 0); #endif vinvalbuf(vp, V_ALT, 0, 0); vn_pages_remove(vp, OFF_TO_IDX(lblktosize(fs, -extblocks)), 0); osize = ip->i_din2->di_extsize; ip->i_din2->di_blocks -= extblocks; ip->i_din2->di_extsize = 0; for (i = 0; i < UFS_NXADDR; i++) { oldblks[i] = ip->i_din2->di_extb[i]; ip->i_din2->di_extb[i] = 0; } ip->i_flag |= IN_CHANGE; if ((error = ffs_update(vp, waitforupdate))) return (error); for (i = 0; i < UFS_NXADDR; i++) { if (oldblks[i] == 0) continue; ffs_blkfree(ump, fs, ITODEVVP(ip), oldblks[i], sblksize(fs, osize, i), ip->i_number, vp->v_type, nil); } } } if ((flags & IO_NORMAL) == 0) return (0); if (vp->v_type == VLNK && (ip->i_size < vp->v_mount->mnt_maxsymlinklen || datablocks == 0)) { #ifdef INVARIANTS if (length != 0) panic("ffs_truncate: partial truncate of symlink"); #endif bzero(SHORTLINK(ip), (uint)ip->i_size); ip->i_size = 0; DIP_SET(ip, i_size, 0); ip->i_flag |= IN_CHANGE | IN_UPDATE; if (needextclean) goto extclean; return (ffs_update(vp, waitforupdate)); } if (ip->i_size == length) { ip->i_flag |= IN_CHANGE | IN_UPDATE; if (needextclean) goto extclean; return (ffs_update(vp, 0)); } if (fs->fs_ronly) panic("ffs_truncate: read-only filesystem"); if (IS_SNAPSHOT(ip)) ffs_snapremove(vp); vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; osize = ip->i_size; /* * Lengthen the size of the file. We must ensure that the * last byte of the file is allocated. Since the smallest * value of osize is 0, length will be at least 1. */ if (osize < length) { vnode_pager_setsize(vp, length); flags |= BA_CLRBUF; error = UFS_BALLOC(vp, length - 1, 1, cred, flags, &bp); if (error) { vnode_pager_setsize(vp, osize); return (error); } ip->i_size = length; DIP_SET(ip, i_size, length); if (bp->b_bufsize == fs->fs_bsize) bp->b_flags |= B_CLUSTEROK; if (flags & IO_SYNC) bwrite(bp); else if (DOINGASYNC(vp)) bdwrite(bp); else bawrite(bp); ip->i_flag |= IN_CHANGE | IN_UPDATE; return (ffs_update(vp, waitforupdate)); } /* * Lookup block number for a given offset. Zero length files * have no blocks, so return a blkno of -1. */ lbn = lblkno(fs, length - 1); if (length == 0) { blkno = -1; } else if (lbn < UFS_NDADDR) { blkno = DIP(ip, i_db[lbn]); } else { error = UFS_BALLOC(vp, lblktosize(fs, (off_t)lbn), fs->fs_bsize, cred, BA_METAONLY, &bp); if (error) return (error); indiroff = (lbn - UFS_NDADDR) % NINDIR(fs); if (I_IS_UFS1(ip)) blkno = ((ufs1_daddr_t *)(bp->b_data))[indiroff]; else blkno = ((ufs2_daddr_t *)(bp->b_data))[indiroff]; /* * If the block number is non-zero, then the indirect block * must have been previously allocated and need not be written. * If the block number is zero, then we may have allocated * the indirect block and hence need to write it out. */ if (blkno != 0) brelse(bp); else if (flags & IO_SYNC) bwrite(bp); else bdwrite(bp); } /* * If the block number at the new end of the file is zero, * then we must allocate it to ensure that the last block of * the file is allocated. Soft updates does not handle this * case, so here we have to clean up the soft updates data * structures describing the allocation past the truncation * point. Finding and deallocating those structures is a lot of * work. Since partial truncation with a hole at the end occurs * rarely, we solve the problem by syncing the file so that it * will have no soft updates data structures left. */ if (blkno == 0 && (error = ffs_syncvnode(vp, MNT_WAIT, 0)) != 0) return (error); if (blkno != 0 && DOINGSOFTDEP(vp)) { if (softdeptrunc == 0 && journaltrunc == 0) { /* * If soft updates cannot handle this truncation, * clean up soft dependency data structures and * fall through to the synchronous truncation. */ if ((error = ffs_syncvnode(vp, MNT_WAIT, 0)) != 0) return (error); } else { flags = IO_NORMAL | (needextclean ? IO_EXT: 0); if (journaltrunc) softdep_journal_freeblocks(ip, cred, length, flags); else softdep_setup_freeblocks(ip, length, flags); ASSERT_VOP_LOCKED(vp, "ffs_truncate1"); if (journaltrunc == 0) { ip->i_flag |= IN_CHANGE | IN_UPDATE; error = ffs_update(vp, 0); } return (error); } } /* * Shorten the size of the file. If the last block of the * shortened file is unallocated, we must allocate it. * Additionally, if the file is not being truncated to a * block boundary, the contents of the partial block * following the end of the file must be zero'ed in * case it ever becomes accessible again because of * subsequent file growth. Directories however are not * zero'ed as they should grow back initialized to empty. */ offset = blkoff(fs, length); if (blkno != 0 && offset == 0) { ip->i_size = length; DIP_SET(ip, i_size, length); } else { lbn = lblkno(fs, length); flags |= BA_CLRBUF; error = UFS_BALLOC(vp, length - 1, 1, cred, flags, &bp); if (error) return (error); /* * When we are doing soft updates and the UFS_BALLOC * above fills in a direct block hole with a full sized * block that will be truncated down to a fragment below, * we must flush out the block dependency with an FSYNC * so that we do not get a soft updates inconsistency * when we create the fragment below. */ if (DOINGSOFTDEP(vp) && lbn < UFS_NDADDR && fragroundup(fs, blkoff(fs, length)) < fs->fs_bsize && (error = ffs_syncvnode(vp, MNT_WAIT, 0)) != 0) return (error); ip->i_size = length; DIP_SET(ip, i_size, length); size = blksize(fs, ip, lbn); if (vp->v_type != VDIR && offset != 0) bzero((char *)bp->b_data + offset, (uint)(size - offset)); /* Kirk's code has reallocbuf(bp, size, 1) here */ allocbuf(bp, size); if (bp->b_bufsize == fs->fs_bsize) bp->b_flags |= B_CLUSTEROK; if (flags & IO_SYNC) bwrite(bp); else if (DOINGASYNC(vp)) bdwrite(bp); else bawrite(bp); } /* * Calculate index into inode's block list of * last direct and indirect blocks (if any) * which we want to keep. Lastblock is -1 when * the file is truncated to 0. */ lastblock = lblkno(fs, length + fs->fs_bsize - 1) - 1; lastiblock[SINGLE] = lastblock - UFS_NDADDR; lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs); lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs); nblocks = btodb(fs->fs_bsize); /* * Update file and block pointers on disk before we start freeing * blocks. If we crash before free'ing blocks below, the blocks * will be returned to the free list. lastiblock values are also * normalized to -1 for calls to ffs_indirtrunc below. */ for (level = TRIPLE; level >= SINGLE; level--) { oldblks[UFS_NDADDR + level] = DIP(ip, i_ib[level]); if (lastiblock[level] < 0) { DIP_SET(ip, i_ib[level], 0); lastiblock[level] = -1; } } for (i = 0; i < UFS_NDADDR; i++) { oldblks[i] = DIP(ip, i_db[i]); if (i > lastblock) DIP_SET(ip, i_db[i], 0); } ip->i_flag |= IN_CHANGE | IN_UPDATE; allerror = ffs_update(vp, waitforupdate); /* * Having written the new inode to disk, save its new configuration * and put back the old block pointers long enough to process them. * Note that we save the new block configuration so we can check it * when we are done. */ for (i = 0; i < UFS_NDADDR; i++) { newblks[i] = DIP(ip, i_db[i]); DIP_SET(ip, i_db[i], oldblks[i]); } for (i = 0; i < UFS_NIADDR; i++) { newblks[UFS_NDADDR + i] = DIP(ip, i_ib[i]); DIP_SET(ip, i_ib[i], oldblks[UFS_NDADDR + i]); } ip->i_size = osize; DIP_SET(ip, i_size, osize); error = vtruncbuf(vp, cred, length, fs->fs_bsize); if (error && (allerror == 0)) allerror = error; /* * Indirect blocks first. */ indir_lbn[SINGLE] = -UFS_NDADDR; indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1; indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1; for (level = TRIPLE; level >= SINGLE; level--) { bn = DIP(ip, i_ib[level]); if (bn != 0) { error = ffs_indirtrunc(ip, indir_lbn[level], fsbtodb(fs, bn), lastiblock[level], level, &count); if (error) allerror = error; blocksreleased += count; if (lastiblock[level] < 0) { DIP_SET(ip, i_ib[level], 0); ffs_blkfree(ump, fs, ump->um_devvp, bn, fs->fs_bsize, ip->i_number, vp->v_type, nil); blocksreleased += nblocks; } } if (lastiblock[level] >= 0) goto done; } /* * All whole direct blocks or frags. */ for (i = UFS_NDADDR - 1; i > lastblock; i--) { long bsize; bn = DIP(ip, i_db[i]); if (bn == 0) continue; DIP_SET(ip, i_db[i], 0); bsize = blksize(fs, ip, i); ffs_blkfree(ump, fs, ump->um_devvp, bn, bsize, ip->i_number, vp->v_type, nil); blocksreleased += btodb(bsize); } if (lastblock < 0) goto done; /* * Finally, look for a change in size of the * last direct block; release any frags. */ bn = DIP(ip, i_db[lastblock]); if (bn != 0) { long oldspace, newspace; /* * Calculate amount of space we're giving * back as old block size minus new block size. */ oldspace = blksize(fs, ip, lastblock); ip->i_size = length; DIP_SET(ip, i_size, length); newspace = blksize(fs, ip, lastblock); if (newspace == 0) panic("ffs_truncate: newspace"); if (oldspace - newspace > 0) { /* * Block number of space to be free'd is * the old block # plus the number of frags * required for the storage we're keeping. */ bn += numfrags(fs, newspace); ffs_blkfree(ump, fs, ump->um_devvp, bn, oldspace - newspace, ip->i_number, vp->v_type, nil); blocksreleased += btodb(oldspace - newspace); } } done: #ifdef INVARIANTS for (level = SINGLE; level <= TRIPLE; level++) if (newblks[UFS_NDADDR + level] != DIP(ip, i_ib[level])) panic("ffs_truncate1"); for (i = 0; i < UFS_NDADDR; i++) if (newblks[i] != DIP(ip, i_db[i])) panic("ffs_truncate2"); BO_LOCK(bo); if (length == 0 && (fs->fs_magic != FS_UFS2_MAGIC || ip->i_din2->di_extsize == 0) && (bo->bo_dirty.bv_cnt > 0 || bo->bo_clean.bv_cnt > 0)) panic("ffs_truncate3"); BO_UNLOCK(bo); #endif /* INVARIANTS */ /* * Put back the real size. */ ip->i_size = length; DIP_SET(ip, i_size, length); if (DIP(ip, i_blocks) >= blocksreleased) DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - blocksreleased); else /* sanity */ DIP_SET(ip, i_blocks, 0); ip->i_flag |= IN_CHANGE; #ifdef QUOTA (void) chkdq(ip, -blocksreleased, NOCRED, 0); #endif return (allerror); extclean: if (journaltrunc) softdep_journal_freeblocks(ip, cred, length, IO_EXT); else softdep_setup_freeblocks(ip, length, IO_EXT); return (ffs_update(vp, waitforupdate)); #endif // 0 return 0; }
struct fs * ffs_mkfs(const char *fsys, const fsinfo_t *fsopts, time_t tstamp) { int fragsperinode, optimalfpg, origdensity, minfpg, lastminfpg; int32_t cylno, i, csfrags; long long sizepb; void *space; int size, blks; int nprintcols, printcolwidth; ffs_opt_t *ffs_opts = fsopts->fs_specific; Oflag = ffs_opts->version; fssize = fsopts->size / fsopts->sectorsize; sectorsize = fsopts->sectorsize; fsize = ffs_opts->fsize; bsize = ffs_opts->bsize; maxbsize = ffs_opts->maxbsize; maxblkspercg = ffs_opts->maxblkspercg; minfree = ffs_opts->minfree; opt = ffs_opts->optimization; density = ffs_opts->density; maxcontig = ffs_opts->maxcontig; maxbpg = ffs_opts->maxbpg; avgfilesize = ffs_opts->avgfilesize; avgfpdir = ffs_opts->avgfpdir; bbsize = BBSIZE; sbsize = SBLOCKSIZE; strlcpy(sblock.fs_volname, ffs_opts->label, sizeof(sblock.fs_volname)); if (Oflag == 0) { sblock.fs_old_inodefmt = FS_42INODEFMT; sblock.fs_maxsymlinklen = 0; sblock.fs_old_flags = 0; } else { sblock.fs_old_inodefmt = FS_44INODEFMT; sblock.fs_maxsymlinklen = (Oflag == 1 ? MAXSYMLINKLEN_UFS1 : MAXSYMLINKLEN_UFS2); sblock.fs_old_flags = FS_FLAGS_UPDATED; sblock.fs_flags = 0; } /* * Validate the given file system size. * Verify that its last block can actually be accessed. * Convert to file system fragment sized units. */ if (fssize <= 0) { printf("preposterous size %lld\n", (long long)fssize); exit(13); } ffs_wtfs(fssize - 1, sectorsize, (char *)&sblock, fsopts); /* * collect and verify the filesystem density info */ sblock.fs_avgfilesize = avgfilesize; sblock.fs_avgfpdir = avgfpdir; if (sblock.fs_avgfilesize <= 0) printf("illegal expected average file size %d\n", sblock.fs_avgfilesize), exit(14); if (sblock.fs_avgfpdir <= 0) printf("illegal expected number of files per directory %d\n", sblock.fs_avgfpdir), exit(15); /* * collect and verify the block and fragment sizes */ sblock.fs_bsize = bsize; sblock.fs_fsize = fsize; if (!POWEROF2(sblock.fs_bsize)) { printf("block size must be a power of 2, not %d\n", sblock.fs_bsize); exit(16); } if (!POWEROF2(sblock.fs_fsize)) { printf("fragment size must be a power of 2, not %d\n", sblock.fs_fsize); exit(17); } if (sblock.fs_fsize < sectorsize) { printf("fragment size %d is too small, minimum is %d\n", sblock.fs_fsize, sectorsize); exit(18); } if (sblock.fs_bsize < MINBSIZE) { printf("block size %d is too small, minimum is %d\n", sblock.fs_bsize, MINBSIZE); exit(19); } if (sblock.fs_bsize > FFS_MAXBSIZE) { printf("block size %d is too large, maximum is %d\n", sblock.fs_bsize, FFS_MAXBSIZE); exit(19); } if (sblock.fs_bsize < sblock.fs_fsize) { printf("block size (%d) cannot be smaller than fragment size (%d)\n", sblock.fs_bsize, sblock.fs_fsize); exit(20); } if (maxbsize < bsize || !POWEROF2(maxbsize)) { sblock.fs_maxbsize = sblock.fs_bsize; printf("Extent size set to %d\n", sblock.fs_maxbsize); } else if (sblock.fs_maxbsize > FS_MAXCONTIG * sblock.fs_bsize) { sblock.fs_maxbsize = FS_MAXCONTIG * sblock.fs_bsize; printf("Extent size reduced to %d\n", sblock.fs_maxbsize); } else { sblock.fs_maxbsize = maxbsize; } sblock.fs_maxcontig = maxcontig; if (sblock.fs_maxcontig < sblock.fs_maxbsize / sblock.fs_bsize) { sblock.fs_maxcontig = sblock.fs_maxbsize / sblock.fs_bsize; printf("Maxcontig raised to %d\n", sblock.fs_maxbsize); } if (sblock.fs_maxcontig > 1) sblock.fs_contigsumsize = MIN(sblock.fs_maxcontig,FS_MAXCONTIG); sblock.fs_bmask = ~(sblock.fs_bsize - 1); sblock.fs_fmask = ~(sblock.fs_fsize - 1); sblock.fs_qbmask = ~sblock.fs_bmask; sblock.fs_qfmask = ~sblock.fs_fmask; for (sblock.fs_bshift = 0, i = sblock.fs_bsize; i > 1; i >>= 1) sblock.fs_bshift++; for (sblock.fs_fshift = 0, i = sblock.fs_fsize; i > 1; i >>= 1) sblock.fs_fshift++; sblock.fs_frag = numfrags(&sblock, sblock.fs_bsize); for (sblock.fs_fragshift = 0, i = sblock.fs_frag; i > 1; i >>= 1) sblock.fs_fragshift++; if (sblock.fs_frag > MAXFRAG) { printf("fragment size %d is too small, " "minimum with block size %d is %d\n", sblock.fs_fsize, sblock.fs_bsize, sblock.fs_bsize / MAXFRAG); exit(21); } sblock.fs_fsbtodb = ilog2(sblock.fs_fsize / sectorsize); sblock.fs_size = sblock.fs_providersize = fssize = dbtofsb(&sblock, fssize); if (Oflag <= 1) { sblock.fs_magic = FS_UFS1_MAGIC; sblock.fs_sblockloc = SBLOCK_UFS1; sblock.fs_nindir = sblock.fs_bsize / sizeof(ufs1_daddr_t); sblock.fs_inopb = sblock.fs_bsize / sizeof(struct ufs1_dinode); sblock.fs_maxsymlinklen = ((NDADDR + NIADDR) * sizeof (ufs1_daddr_t)); sblock.fs_old_inodefmt = FS_44INODEFMT; sblock.fs_old_cgoffset = 0; sblock.fs_old_cgmask = 0xffffffff; sblock.fs_old_size = sblock.fs_size; sblock.fs_old_rotdelay = 0; sblock.fs_old_rps = 60; sblock.fs_old_nspf = sblock.fs_fsize / sectorsize; sblock.fs_old_cpg = 1; sblock.fs_old_interleave = 1; sblock.fs_old_trackskew = 0; sblock.fs_old_cpc = 0; sblock.fs_old_postblformat = 1; sblock.fs_old_nrpos = 1; } else { sblock.fs_magic = FS_UFS2_MAGIC; sblock.fs_sblockloc = SBLOCK_UFS2; sblock.fs_nindir = sblock.fs_bsize / sizeof(ufs2_daddr_t); sblock.fs_inopb = sblock.fs_bsize / sizeof(struct ufs2_dinode); sblock.fs_maxsymlinklen = ((NDADDR + NIADDR) * sizeof (ufs2_daddr_t)); } sblock.fs_sblkno = roundup(howmany(sblock.fs_sblockloc + SBLOCKSIZE, sblock.fs_fsize), sblock.fs_frag); sblock.fs_cblkno = (daddr_t)(sblock.fs_sblkno + roundup(howmany(SBLOCKSIZE, sblock.fs_fsize), sblock.fs_frag)); sblock.fs_iblkno = sblock.fs_cblkno + sblock.fs_frag; sblock.fs_maxfilesize = sblock.fs_bsize * NDADDR - 1; for (sizepb = sblock.fs_bsize, i = 0; i < NIADDR; i++) { sizepb *= NINDIR(&sblock); sblock.fs_maxfilesize += sizepb; } /* * Calculate the number of blocks to put into each cylinder group. * * This algorithm selects the number of blocks per cylinder * group. The first goal is to have at least enough data blocks * in each cylinder group to meet the density requirement. Once * this goal is achieved we try to expand to have at least * 1 cylinder group. Once this goal is achieved, we pack as * many blocks into each cylinder group map as will fit. * * We start by calculating the smallest number of blocks that we * can put into each cylinder group. If this is too big, we reduce * the density until it fits. */ origdensity = density; for (;;) { fragsperinode = MAX(numfrags(&sblock, density), 1); minfpg = fragsperinode * INOPB(&sblock); if (minfpg > sblock.fs_size) minfpg = sblock.fs_size; sblock.fs_ipg = INOPB(&sblock); sblock.fs_fpg = roundup(sblock.fs_iblkno + sblock.fs_ipg / INOPF(&sblock), sblock.fs_frag); if (sblock.fs_fpg < minfpg) sblock.fs_fpg = minfpg; sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode), INOPB(&sblock)); sblock.fs_fpg = roundup(sblock.fs_iblkno + sblock.fs_ipg / INOPF(&sblock), sblock.fs_frag); if (sblock.fs_fpg < minfpg) sblock.fs_fpg = minfpg; sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode), INOPB(&sblock)); if (CGSIZE(&sblock) < (unsigned long)sblock.fs_bsize) break; density -= sblock.fs_fsize; } if (density != origdensity) printf("density reduced from %d to %d\n", origdensity, density); if (maxblkspercg <= 0 || maxblkspercg >= fssize) maxblkspercg = fssize - 1; /* * Start packing more blocks into the cylinder group until * it cannot grow any larger, the number of cylinder groups * drops below 1, or we reach the size requested. */ for ( ; sblock.fs_fpg < maxblkspercg; sblock.fs_fpg += sblock.fs_frag) { sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode), INOPB(&sblock)); if (sblock.fs_size / sblock.fs_fpg < 1) break; if (CGSIZE(&sblock) < (unsigned long)sblock.fs_bsize) continue; if (CGSIZE(&sblock) == (unsigned long)sblock.fs_bsize) break; sblock.fs_fpg -= sblock.fs_frag; sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode), INOPB(&sblock)); break; } /* * Check to be sure that the last cylinder group has enough blocks * to be viable. If it is too small, reduce the number of blocks * per cylinder group which will have the effect of moving more * blocks into the last cylinder group. */ optimalfpg = sblock.fs_fpg; for (;;) { sblock.fs_ncg = howmany(sblock.fs_size, sblock.fs_fpg); lastminfpg = roundup(sblock.fs_iblkno + sblock.fs_ipg / INOPF(&sblock), sblock.fs_frag); if (sblock.fs_size < lastminfpg) { printf("Filesystem size %lld < minimum size of %d\n", (long long)sblock.fs_size, lastminfpg); exit(28); } if (sblock.fs_size % sblock.fs_fpg >= lastminfpg || sblock.fs_size % sblock.fs_fpg == 0) break; sblock.fs_fpg -= sblock.fs_frag; sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode), INOPB(&sblock)); } if (optimalfpg != sblock.fs_fpg) printf("Reduced frags per cylinder group from %d to %d %s\n", optimalfpg, sblock.fs_fpg, "to enlarge last cyl group"); sblock.fs_cgsize = fragroundup(&sblock, CGSIZE(&sblock)); sblock.fs_dblkno = sblock.fs_iblkno + sblock.fs_ipg / INOPF(&sblock); if (Oflag <= 1) { sblock.fs_old_spc = sblock.fs_fpg * sblock.fs_old_nspf; sblock.fs_old_nsect = sblock.fs_old_spc; sblock.fs_old_npsect = sblock.fs_old_spc; sblock.fs_old_ncyl = sblock.fs_ncg; } /* * fill in remaining fields of the super block */ sblock.fs_csaddr = cgdmin(&sblock, 0); sblock.fs_cssize = fragroundup(&sblock, sblock.fs_ncg * sizeof(struct csum)); /* * Setup memory for temporary in-core cylgroup summaries. * Cribbed from ffs_mountfs(). */ size = sblock.fs_cssize; blks = howmany(size, sblock.fs_fsize); if (sblock.fs_contigsumsize > 0) size += sblock.fs_ncg * sizeof(int32_t); if ((space = (char *)calloc(1, size)) == NULL) err(1, "memory allocation error for cg summaries"); sblock.fs_csp = space; space = (char *)space + sblock.fs_cssize; if (sblock.fs_contigsumsize > 0) { int32_t *lp; sblock.fs_maxcluster = lp = space; for (i = 0; i < sblock.fs_ncg; i++) *lp++ = sblock.fs_contigsumsize; } sblock.fs_sbsize = fragroundup(&sblock, sizeof(struct fs)); if (sblock.fs_sbsize > SBLOCKSIZE) sblock.fs_sbsize = SBLOCKSIZE; sblock.fs_minfree = minfree; sblock.fs_maxcontig = maxcontig; sblock.fs_maxbpg = maxbpg; sblock.fs_optim = opt; sblock.fs_cgrotor = 0; sblock.fs_pendingblocks = 0; sblock.fs_pendinginodes = 0; sblock.fs_cstotal.cs_ndir = 0; sblock.fs_cstotal.cs_nbfree = 0; sblock.fs_cstotal.cs_nifree = 0; sblock.fs_cstotal.cs_nffree = 0; sblock.fs_fmod = 0; sblock.fs_ronly = 0; sblock.fs_state = 0; sblock.fs_clean = FS_ISCLEAN; sblock.fs_ronly = 0; sblock.fs_id[0] = tstamp; sblock.fs_id[1] = random(); sblock.fs_fsmnt[0] = '\0'; csfrags = howmany(sblock.fs_cssize, sblock.fs_fsize); sblock.fs_dsize = sblock.fs_size - sblock.fs_sblkno - sblock.fs_ncg * (sblock.fs_dblkno - sblock.fs_sblkno); sblock.fs_cstotal.cs_nbfree = fragstoblks(&sblock, sblock.fs_dsize) - howmany(csfrags, sblock.fs_frag); sblock.fs_cstotal.cs_nffree = fragnum(&sblock, sblock.fs_size) + (fragnum(&sblock, csfrags) > 0 ? sblock.fs_frag - fragnum(&sblock, csfrags) : 0); sblock.fs_cstotal.cs_nifree = sblock.fs_ncg * sblock.fs_ipg - ROOTINO; sblock.fs_cstotal.cs_ndir = 0; sblock.fs_dsize -= csfrags; sblock.fs_time = tstamp; if (Oflag <= 1) { sblock.fs_old_time = tstamp; sblock.fs_old_dsize = sblock.fs_dsize; sblock.fs_old_csaddr = sblock.fs_csaddr; sblock.fs_old_cstotal.cs_ndir = sblock.fs_cstotal.cs_ndir; sblock.fs_old_cstotal.cs_nbfree = sblock.fs_cstotal.cs_nbfree; sblock.fs_old_cstotal.cs_nifree = sblock.fs_cstotal.cs_nifree; sblock.fs_old_cstotal.cs_nffree = sblock.fs_cstotal.cs_nffree; } /* * Dump out summary information about file system. */ #define B2MBFACTOR (1 / (1024.0 * 1024.0)) printf("%s: %.1fMB (%lld sectors) block size %d, " "fragment size %d\n", fsys, (float)sblock.fs_size * sblock.fs_fsize * B2MBFACTOR, (long long)fsbtodb(&sblock, sblock.fs_size), sblock.fs_bsize, sblock.fs_fsize); printf("\tusing %d cylinder groups of %.2fMB, %d blks, " "%d inodes.\n", sblock.fs_ncg, (float)sblock.fs_fpg * sblock.fs_fsize * B2MBFACTOR, sblock.fs_fpg / sblock.fs_frag, sblock.fs_ipg); #undef B2MBFACTOR /* * Now determine how wide each column will be, and calculate how * many columns will fit in a 76 char line. 76 is the width of the * subwindows in sysinst. */ printcolwidth = count_digits( fsbtodb(&sblock, cgsblock(&sblock, sblock.fs_ncg -1))); nprintcols = 76 / (printcolwidth + 2); /* * allocate space for superblock, cylinder group map, and * two sets of inode blocks. */ if (sblock.fs_bsize < SBLOCKSIZE) iobufsize = SBLOCKSIZE + 3 * sblock.fs_bsize; else iobufsize = 4 * sblock.fs_bsize; if ((iobuf = malloc(iobufsize)) == NULL) { printf("Cannot allocate I/O buffer\n"); exit(38); } memset(iobuf, 0, iobufsize); /* * Make a copy of the superblock into the buffer that we will be * writing out in each cylinder group. */ memcpy(writebuf, &sblock, sbsize); if (fsopts->needswap) ffs_sb_swap(&sblock, (struct fs*)writebuf); memcpy(iobuf, writebuf, SBLOCKSIZE); printf("super-block backups (for fsck -b #) at:"); for (cylno = 0; cylno < sblock.fs_ncg; cylno++) { initcg(cylno, tstamp, fsopts); if (cylno % nprintcols == 0) printf("\n"); printf(" %*lld,", printcolwidth, (long long)fsbtodb(&sblock, cgsblock(&sblock, cylno))); fflush(stdout); } printf("\n"); /* * Now construct the initial file system, * then write out the super-block. */ sblock.fs_time = tstamp; if (Oflag <= 1) { sblock.fs_old_cstotal.cs_ndir = sblock.fs_cstotal.cs_ndir; sblock.fs_old_cstotal.cs_nbfree = sblock.fs_cstotal.cs_nbfree; sblock.fs_old_cstotal.cs_nifree = sblock.fs_cstotal.cs_nifree; sblock.fs_old_cstotal.cs_nffree = sblock.fs_cstotal.cs_nffree; } if (fsopts->needswap) sblock.fs_flags |= FS_SWAPPED; ffs_write_superblock(&sblock, fsopts); return (&sblock); }
int make_lfs(int devfd, uint secsize, struct dkwedge_info *dkw, int minfree, int block_size, int frag_size, int seg_size, int minfreeseg, int resvseg, int version, daddr_t start, int ibsize, int interleave, u_int32_t roll_id) { struct ufs1_dinode *dip; /* Pointer to a disk inode */ CLEANERINFO *cip; /* Segment cleaner information table */ IFILE *ip; /* Pointer to array of ifile structures */ IFILE_V1 *ip_v1 = NULL; struct lfs *fs; /* Superblock */ SEGUSE *segp; /* Segment usage table */ daddr_t sb_addr; /* Address of superblocks */ daddr_t seg_addr; /* Address of current segment */ int bsize; /* Block size */ int fsize; /* Fragment size */ int db_per_blk; /* Disk blocks per file block */ int i, j; int sb_interval; /* number of segs between super blocks */ int ssize; /* Segment size */ double fssize; int warned_segtoobig=0; int label_fsb, sb_fsb; int curw, ww; char tbuf[BUFSIZ]; struct ubuf *bp; struct uvnode *vp, *save_devvp; int bb, ubb, dmeta, labelskew; u_int64_t tsepb, tnseg; /* * Initialize buffer cache. Use a ballpark guess of the length of * the segment table for the number of hash chains. */ tnseg = dkw->dkw_size / ((seg_size ? seg_size : DFL_LFSSEG) / secsize); tsepb = (block_size ? block_size : DFL_LFSBLOCK) / sizeof(SEGSUM); if (tnseg == 0) fatal("zero size partition"); bufinit(tnseg / tsepb); /* Initialize LFS subsystem with blank superblock and ifile. */ fs = lfs_init(devfd, start, (ufs_daddr_t)0, 1, 1/* XXX debug*/); save_devvp = fs->lfs_devvp; vp = fs->lfs_ivnode; *fs = lfs_default; fs->lfs_ivnode = vp; fs->lfs_devvp = save_devvp; /* Set version first of all since it is used to compute other fields */ fs->lfs_version = version; /* If partition is not an LFS partition, warn that that is the case */ if (strcmp(dkw->dkw_ptype, DKW_PTYPE_LFS) != 0) { fatal("partition label indicated fs type \"%s\", " "expected \"%s\"", dkw->dkw_ptype, DKW_PTYPE_LFS); } if (!(bsize = block_size)) bsize = DFL_LFSBLOCK; if (!(fsize = frag_size)) fsize = DFL_LFSFRAG; if (!(ssize = seg_size)) { ssize = DFL_LFSSEG; } if (version > 1) { if (ibsize == 0) ibsize = fsize; if (ibsize <= 0 || ibsize % fsize) fatal("illegal inode block size: %d\n", ibsize); } else if (ibsize && ibsize != bsize) fatal("cannot specify inode block size when version == 1\n"); /* Sanity check: fsize<=bsize<ssize */ if (fsize > bsize) { /* Only complain if fsize was explicitly set */ if(frag_size) fatal("fragment size must be <= block size %d", bsize); fsize = bsize; } if (bsize >= ssize) { /* Only fatal if ssize was explicitly set */ if(seg_size) fatal("block size must be < segment size"); warnx("%s: disklabel segment size (%d) too small, using default (%d)", progname, ssize, DFL_LFSSEG); ssize = DFL_LFSSEG; } if (start < 0 || start >= dkw->dkw_size) fatal("filesystem offset %ld out of range", (long)start); if (version == 1) { if (start) warnx("filesystem offset ignored for version 1 filesystem"); start = LFS_LABELPAD / secsize; } tryagain: /* Modify parts of superblock overridden by command line arguments */ if (bsize != DFL_LFSBLOCK || fsize != DFL_LFSFRAG) { fs->lfs_bshift = lfs_log2(bsize); if (1 << fs->lfs_bshift != bsize) fatal("%d: block size not a power of 2", bsize); fs->lfs_bsize = bsize; fs->lfs_fsize = fsize; fs->lfs_bmask = bsize - 1; fs->lfs_ffmask = fsize - 1; fs->lfs_ffshift = lfs_log2(fsize); if (1 << fs->lfs_ffshift != fsize) fatal("%d: frag size not a power of 2", fsize); fs->lfs_frag = numfrags(fs, bsize); fs->lfs_fbmask = fs->lfs_frag - 1; fs->lfs_fbshift = lfs_log2(fs->lfs_frag); fs->lfs_ifpb = bsize / sizeof(IFILE); /* XXX ondisk32 */ fs->lfs_nindir = bsize / sizeof(int32_t); } if (fs->lfs_version == 1) { fs->lfs_sumsize = LFS_V1_SUMMARY_SIZE; fs->lfs_segshift = lfs_log2(ssize); if (1 << fs->lfs_segshift != ssize) fatal("%d: segment size not power of 2", ssize); fs->lfs_segmask = ssize - 1; fs->lfs_ifpb = fs->lfs_bsize / sizeof(IFILE_V1); fs->lfs_ibsize = fs->lfs_bsize; fs->lfs_sepb = bsize / sizeof(SEGUSE_V1); fs->lfs_ssize = ssize >> fs->lfs_bshift; } else {