int chkuse( daddr_t blkno, int cnt ) { int cg; daddr_t fsbn, bn; fsbn = dbtofsb( fs, blkno ); if ( (unsigned) ( fsbn + cnt ) > fs->fs_size ) { printf( "block %ld out of range of file system\n", (long) blkno ); return ( 1 ); } cg = dtog( fs, fsbn ); if ( fsbn < cgdmin( fs, cg ) ) { if ( cg == 0 || ( fsbn + cnt ) > cgsblock( fs, cg ) ) { printf( "block %ld in non-data area: cannot attach\n", (long) blkno ); return ( 1 ); } } else { if ( ( fsbn + cnt ) > cgbase( fs, cg + 1 ) ) { printf( "block %ld in non-data area: cannot attach\n", (long) blkno ); return ( 1 ); } } if ( cgread1( &disk, cg ) != 1 ) { fprintf( stderr, "cg %d: could not be read\n", cg ); errs++; return ( 1 ); } if ( !cg_chkmagic( &acg ) ) { fprintf( stderr, "cg %d: bad magic number\n", cg ); errs++; return ( 1 ); } bn = dtogd( fs, fsbn ); if ( isclr( cg_blksfree( &acg ), bn ) ) printf( "Warning: sector %ld is in use\n", (long) blkno ); return ( 0 ); }
void pass1(void) { uint_t c, i; daddr32_t cgd; struct inodesc idesc; fsck_ino_t inumber; fsck_ino_t maxinumber; /* * Set file system reserved blocks in used block map. */ for (c = 0; c < sblock.fs_ncg; c++) { cgd = cgdmin(&sblock, c); if (c == 0) { /* * Doing the first cylinder group, account for * the cg summaries as well. */ i = cgbase(&sblock, c); cgd += howmany(sblock.fs_cssize, sblock.fs_fsize); } else { i = cgsblock(&sblock, c); } for (; i < cgd; i++) { note_used(i); } } /* * Note blocks being used by the log, so we don't declare * them as available and some time in the future we get a * freeing free block panic. */ if (islog && islogok && sblock.fs_logbno) examinelog(¬e_used); /* * Find all allocated blocks. This must be completed before * we read the contents of any directories, as dirscan() et al * don't want to know about block allocation holes. So, part * of this pass is to truncate any directories with holes to * just before those holes, so dirscan() can remain blissfully * ignorant. */ inumber = 0; n_files = n_blks = 0; resetinodebuf(); maxinumber = sblock.fs_ncg * sblock.fs_ipg; for (c = 0; c < sblock.fs_ncg; c++) { for (i = 0; i < sblock.fs_ipg; i++, inumber++) { if (inumber < UFSROOTINO) continue; init_inodesc(&idesc); idesc.id_type = ADDR; idesc.id_func = pass1check; verify_inode(inumber, &idesc, maxinumber); } } freeinodebuf(); }
/* * Check that a block in a legal block number. * Return 0 if in range, 1 if out of range. */ int chkrange(ufs_daddr_t blk, int cnt) { int c; if (cnt <= 0 || blk <= 0 || blk > maxfsblock || cnt - 1 > maxfsblock - blk) return (1); if (cnt > sblock.fs_frag || fragnum(&sblock, blk) + cnt > sblock.fs_frag) { if (debug) printf("bad size: blk %ld, offset %d, size %d\n", (long)blk, fragnum(&sblock, blk), cnt); return (1); } c = dtog(&sblock, blk); if (blk < cgdmin(&sblock, c)) { if ((blk + cnt) > cgsblock(&sblock, c)) { if (debug) { printf("blk %ld < cgdmin %ld;", (long)blk, (long)cgdmin(&sblock, c)); printf(" blk + cnt %ld > cgsbase %ld\n", (long)(blk + cnt), (long)cgsblock(&sblock, c)); } return (1); } } else { if ((blk + cnt) > cgbase(&sblock, c+1)) { if (debug) { printf("blk %ld >= cgdmin %ld;", (long)blk, (long)cgdmin(&sblock, c)); printf(" blk + cnt %ld > sblock.fs_fpg %ld\n", (long)(blk + cnt), (long)sblock.fs_fpg); } return (1); } } return (0); }
void pass1() { ino_t inumber; int c, i, cgd; struct inodesc idesc; /* * Set file system reserved blocks in used block map. */ for (c = 0; c < sblock.fs_ncg; c++) { cgd = cgdmin(&sblock, c); if (c == 0) { i = cgbase(&sblock, c); cgd += howmany(sblock.fs_cssize, sblock.fs_fsize); } else i = cgsblock(&sblock, c); for (; i < cgd; i++) setbmap(i); } /* * Find all allocated blocks. */ memset(&idesc, 0, sizeof(struct inodesc)); idesc.id_type = ADDR; idesc.id_func = pass1check; inumber = 0; n_files = n_blks = 0; resetinodebuf(); for (c = 0; c < sblock.fs_ncg; c++) { for (i = 0; i < sblock.fs_ipg; i++, inumber++) { if (inumber < ROOTINO) continue; checkinode(inumber, &idesc); } } freeinodebuf(); }
ufs2_daddr_t cgballoc(struct uufsd *disk) { u_int8_t *blksfree; struct cg *cgp; struct fs *fs; long bno; fs = &disk->d_fs; cgp = &disk->d_cg; blksfree = cg_blksfree(cgp); for (bno = 0; bno < fs->fs_fpg / fs->fs_frag; bno++) if (ffs_isblock(fs, blksfree, bno)) goto gotit; return (0); gotit: fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--; ffs_clrblock(fs, blksfree, (long)bno); ffs_clusteracct(fs, cgp, bno, -1); cgp->cg_cs.cs_nbfree--; fs->fs_cstotal.cs_nbfree--; fs->fs_fmod = 1; return (cgbase(fs, cgp->cg_cgx) + blkstofrags(fs, bno)); }
pass5() { int c, blk, frags, sumsize, mapsize; daddr_t dbase, dmax, d; register long i, j; struct csum *cs; time_t now; struct csum cstotal; struct inodesc idesc; char buf[MAXBSIZE]; register struct cg *newcg = (struct cg *)buf; bzero((char *)newcg, sblock.fs_cgsize); newcg->cg_magic = CG_MAGIC; bzero((char *)&idesc, sizeof(struct inodesc)); idesc.id_type = ADDR; bzero((char *)&cstotal, sizeof(struct csum)); sumsize = cgrp.cg_iused - (char *)(&cgrp); mapsize = &cgrp.cg_free[howmany(sblock.fs_fpg, NBBY)] - (u_char *)cgrp.cg_iused; (void)time(&now); for (c = 0; c < sblock.fs_ncg; c++) { getblk(&cgblk, cgtod(&sblock, c), sblock.fs_cgsize); if (cgrp.cg_magic != CG_MAGIC) pfatal("CG %d: BAD MAGIC NUMBER\n", c); dbase = cgbase(&sblock, c); dmax = dbase + sblock.fs_fpg; if (dmax > sblock.fs_size) dmax = sblock.fs_size; if (now > cgrp.cg_time) newcg->cg_time = cgrp.cg_time; else newcg->cg_time = now; newcg->cg_cgx = c; if (c == sblock.fs_ncg - 1) newcg->cg_ncyl = sblock.fs_ncyl % sblock.fs_cpg; else newcg->cg_ncyl = sblock.fs_cpg; newcg->cg_niblk = sblock.fs_ipg; newcg->cg_ndblk = dmax - dbase; newcg->cg_cs.cs_ndir = 0; newcg->cg_cs.cs_nffree = 0; newcg->cg_cs.cs_nbfree = 0; newcg->cg_cs.cs_nifree = sblock.fs_ipg; if (cgrp.cg_rotor < newcg->cg_ndblk) newcg->cg_rotor = cgrp.cg_rotor; else newcg->cg_rotor = 0; if (cgrp.cg_frotor < newcg->cg_ndblk) newcg->cg_frotor = cgrp.cg_frotor; else newcg->cg_frotor = 0; if (cgrp.cg_irotor < newcg->cg_niblk) newcg->cg_irotor = cgrp.cg_irotor; else newcg->cg_irotor = 0; bzero((char *)newcg->cg_frsum, sizeof newcg->cg_frsum); bzero((char *)newcg->cg_btot, sizeof newcg->cg_btot); bzero((char *)newcg->cg_b, sizeof newcg->cg_b); bzero((char *)newcg->cg_free, howmany(sblock.fs_fpg, NBBY)); bzero((char *)newcg->cg_iused, howmany(sblock.fs_ipg, NBBY)); j = sblock.fs_ipg * c; for (i = 0; i < sblock.fs_ipg; j++, i++) { switch (statemap[j]) { case USTATE: break; case DSTATE: case DCLEAR: case DFOUND: newcg->cg_cs.cs_ndir++; /* fall through */ case FSTATE: case FCLEAR: newcg->cg_cs.cs_nifree--; setbit(newcg->cg_iused, i); break; default: if (j < ROOTINO) break; errexit("BAD STATE %d FOR INODE I=%d", statemap[j], j); } } if (c == 0) for (i = 0; i < ROOTINO; i++) { setbit(newcg->cg_iused, i); newcg->cg_cs.cs_nifree--; } for (i = 0, d = dbase; d <= dmax - sblock.fs_frag; d += sblock.fs_frag, i += sblock.fs_frag) { frags = 0; for (j = 0; j < sblock.fs_frag; j++) { if (getbmap(d + j)) continue; setbit(newcg->cg_free, i + j); frags++; } if (frags == sblock.fs_frag) { newcg->cg_cs.cs_nbfree++; j = cbtocylno(&sblock, i); newcg->cg_btot[j]++; newcg->cg_b[j][cbtorpos(&sblock, i)]++; } else if (frags > 0) { newcg->cg_cs.cs_nffree += frags; blk = blkmap(&sblock, newcg->cg_free, i); fragacct(&sblock, blk, newcg->cg_frsum, 1); } } for (frags = d; d < dmax; d++) { if (getbmap(d)) continue; setbit(newcg->cg_free, d - dbase); newcg->cg_cs.cs_nffree++; } if (frags != d) { blk = blkmap(&sblock, newcg->cg_free, (frags - dbase)); fragacct(&sblock, blk, newcg->cg_frsum, 1); } cstotal.cs_nffree += newcg->cg_cs.cs_nffree; cstotal.cs_nbfree += newcg->cg_cs.cs_nbfree; cstotal.cs_nifree += newcg->cg_cs.cs_nifree; cstotal.cs_ndir += newcg->cg_cs.cs_ndir; if (bcmp(newcg->cg_iused, cgrp.cg_iused, mapsize) != 0 && dofix(&idesc, "BLK(S) MISSING IN BIT MAPS")) { bcopy(newcg->cg_iused, cgrp.cg_iused, mapsize); cgdirty(); } if (bcmp((char *)newcg, (char *)&cgrp, sumsize) != 0 && dofix(&idesc, "SUMMARY INFORMATION BAD")) { bcopy((char *)newcg, (char *)&cgrp, sumsize); cgdirty(); } cs = &sblock.fs_cs(&sblock, c); if (bcmp((char *)&newcg->cg_cs, (char *)cs, sizeof *cs) != 0 && dofix(&idesc, "FREE BLK COUNT(S) WRONG IN SUPERBLK")) { bcopy((char *)&newcg->cg_cs, (char *)cs, sizeof *cs); sbdirty(); } } if (bcmp((char *)&cstotal, (char *)&sblock.fs_cstotal, sizeof *cs) != 0 && dofix(&idesc, "FREE BLK COUNT(S) WRONG IN SUPERBLK")) { bcopy((char *)&cstotal, (char *)&sblock.fs_cstotal, sizeof *cs); sblock.fs_ronly = 0; sblock.fs_fmod = 0; sbdirty(); } }
void pass1(void) { struct inostat *info; struct inodesc idesc; struct bufarea *cgbp; struct cg *cgp; ino_t inumber, inosused, mininos; ufs2_daddr_t i, cgd; u_int8_t *cp; int c, rebuildcg; badblk = dupblk = lastino = 0; /* * Set file system reserved blocks in used block map. */ for (c = 0; c < sblock.fs_ncg; c++) { cgd = cgdmin(&sblock, c); if (c == 0) { i = cgbase(&sblock, c); } else i = cgsblock(&sblock, c); for (; i < cgd; i++) setbmap(i); } i = sblock.fs_csaddr; cgd = i + howmany(sblock.fs_cssize, sblock.fs_fsize); for (; i < cgd; i++) setbmap(i); /* * Find all allocated blocks. */ memset(&idesc, 0, sizeof(struct inodesc)); idesc.id_func = pass1check; n_files = n_blks = 0; for (c = 0; c < sblock.fs_ncg; c++) { inumber = c * sblock.fs_ipg; setinodebuf(inumber); cgbp = cgget(c); cgp = cgbp->b_un.b_cg; rebuildcg = 0; if (!check_cgmagic(c, cgbp)) rebuildcg = 1; if (!rebuildcg && sblock.fs_magic == FS_UFS2_MAGIC) { inosused = cgp->cg_initediblk; if (inosused > sblock.fs_ipg) { pfatal( "Too many initialized inodes (%ju > %d) in cylinder group %d\nReset to %d\n", (uintmax_t)inosused, sblock.fs_ipg, c, sblock.fs_ipg); inosused = sblock.fs_ipg; } } else { inosused = sblock.fs_ipg; } if (got_siginfo) { printf("%s: phase 1: cyl group %d of %d (%d%%)\n", cdevname, c, sblock.fs_ncg, c * 100 / sblock.fs_ncg); got_siginfo = 0; } if (got_sigalarm) { setproctitle("%s p1 %d%%", cdevname, c * 100 / sblock.fs_ncg); got_sigalarm = 0; } /* * If we are using soft updates, then we can trust the * cylinder group inode allocation maps to tell us which * inodes are allocated. We will scan the used inode map * to find the inodes that are really in use, and then * read only those inodes in from disk. */ if ((preen || inoopt) && usedsoftdep && !rebuildcg) { cp = &cg_inosused(cgp)[(inosused - 1) / CHAR_BIT]; for ( ; inosused > 0; inosused -= CHAR_BIT, cp--) { if (*cp == 0) continue; for (i = 1 << (CHAR_BIT - 1); i > 0; i >>= 1) { if (*cp & i) break; inosused--; } break; } if (inosused < 0) inosused = 0; } /* * Allocate inoinfo structures for the allocated inodes. */ inostathead[c].il_numalloced = inosused; if (inosused == 0) { inostathead[c].il_stat = NULL; continue; } info = Calloc((unsigned)inosused, sizeof(struct inostat)); if (info == NULL) errx(EEXIT, "cannot alloc %u bytes for inoinfo", (unsigned)(sizeof(struct inostat) * inosused)); inostathead[c].il_stat = info; /* * Scan the allocated inodes. */ for (i = 0; i < inosused; i++, inumber++) { if (inumber < ROOTINO) { (void)getnextinode(inumber, rebuildcg); continue; } /* * NULL return indicates probable end of allocated * inodes during cylinder group rebuild attempt. * We always keep trying until we get to the minimum * valid number for this cylinder group. */ if (checkinode(inumber, &idesc, rebuildcg) == 0 && i > cgp->cg_initediblk) break; } /* * This optimization speeds up future runs of fsck * by trimming down the number of inodes in cylinder * groups that formerly had many inodes but now have * fewer in use. */ mininos = roundup(inosused + INOPB(&sblock), INOPB(&sblock)); if (inoopt && !preen && !rebuildcg && sblock.fs_magic == FS_UFS2_MAGIC && cgp->cg_initediblk > 2 * INOPB(&sblock) && mininos < cgp->cg_initediblk) { i = cgp->cg_initediblk; if (mininos < 2 * INOPB(&sblock)) cgp->cg_initediblk = 2 * INOPB(&sblock); else cgp->cg_initediblk = mininos; pwarn("CYLINDER GROUP %d: RESET FROM %ju TO %d %s\n", c, i, cgp->cg_initediblk, "VALID INODES"); dirty(cgbp); } if (inosused < sblock.fs_ipg) continue; lastino += 1; if (lastino < (c * sblock.fs_ipg)) inosused = 0; else inosused = lastino - (c * sblock.fs_ipg); if (rebuildcg && inosused > cgp->cg_initediblk && sblock.fs_magic == FS_UFS2_MAGIC) { cgp->cg_initediblk = roundup(inosused, INOPB(&sblock)); pwarn("CYLINDER GROUP %d: FOUND %d VALID INODES\n", c, cgp->cg_initediblk); } /* * If we were not able to determine in advance which inodes * were in use, then reduce the size of the inoinfo structure * to the size necessary to describe the inodes that we * really found. */ if (inumber == lastino) continue; inostathead[c].il_numalloced = inosused; if (inosused == 0) { free(inostathead[c].il_stat); inostathead[c].il_stat = NULL; continue; } info = Calloc((unsigned)inosused, sizeof(struct inostat)); if (info == NULL) errx(EEXIT, "cannot alloc %u bytes for inoinfo", (unsigned)(sizeof(struct inostat) * inosused)); memmove(info, inostathead[c].il_stat, inosused * sizeof(*info)); free(inostathead[c].il_stat); inostathead[c].il_stat = info; }
/* * Initialize a cylinder (block) group. */ void initcg(uint cylno) { uint nblcg, i, j, sboff; struct ext2fs_dinode *dp; /* * Make a copy of the superblock and group descriptors. */ if (sblock.e2fs.e2fs_rev == E2FS_REV0 || (sblock.e2fs.e2fs_features_rocompat & EXT2F_ROCOMPAT_SPARSESUPER) == 0 || cg_has_sb(cylno)) { sblock.e2fs.e2fs_block_group_nr = cylno; sboff = 0; if (cgbase(&sblock, cylno) == 0) { /* preserve data in bootblock in cg0 */ sboff = SBOFF; } e2fs_sbsave(&sblock.e2fs, (struct ext2fs *)(iobuf + sboff)); e2fs_cgsave(gd, (struct ext2_gd *)(iobuf + sblock.e2fs_bsize * NBLOCK_SUPERBLOCK), sizeof(struct ext2_gd) * sblock.e2fs_ncg); /* write superblock and group descriptor backups */ wtfs(fsbtodb(&sblock, cgbase(&sblock, cylno)) + sboff / sectorsize, iobufsize - sboff, iobuf + sboff); } /* * Initialize block bitmap. */ memset(buf, 0, sblock.e2fs_bsize); if (cylno == (sblock.e2fs_ncg - 1)) { /* The last group could have less blocks than e2fs_bpg. */ nblcg = sblock.e2fs.e2fs_bcount - cgbase(&sblock, sblock.e2fs_ncg - 1); for (i = nblcg; i < roundup(nblcg, NBBY); i++) setbit(buf, i); memset(&buf[i / NBBY], ~0U, sblock.e2fs.e2fs_bpg - i); } /* set overhead (superblock, group descriptor etc.) blocks used */ for (i = 0; i < cgoverhead(cylno) / NBBY; i++) buf[i] = ~0; i = i * NBBY; for (; i < cgoverhead(cylno); i++) setbit(buf, i); wtfs(fsbtodb(&sblock, gd[cylno].ext2bgd_b_bitmap), sblock.e2fs_bsize, buf); /* * Initialize inode bitmap. * * Assume e2fs_ipg is a multiple of NBBY since * it's a multiple of e2fs_ipb (as we did above). * Note even (possibly smaller) the last group has the same e2fs_ipg. */ assert(!(sblock.e2fs.e2fs_ipg % NBBY)); i = sblock.e2fs.e2fs_ipg / NBBY; memset(buf, 0, i); assert(sblock.e2fs_bsize >= i); memset(buf + i, ~0U, sblock.e2fs_bsize - i); if (cylno == 0) { /* mark reserved inodes */ for (i = 1; i < EXT2_FIRSTINO; i++) setbit(buf, EXT2_INO_INDEX(i)); } wtfs(fsbtodb(&sblock, gd[cylno].ext2bgd_i_bitmap), sblock.e2fs_bsize, buf); /* * Initialize inode tables. * * Just initialize generation numbers for NFS security. * XXX: sys/ufs/ext2fs/ext2fs_alloc.c:ext2fs_valloc() seems * to override these generated numbers. */ memset(buf, 0, sblock.e2fs_bsize); for (i = 0; i < sblock.e2fs_itpg; i++) { for (j = 0; j < sblock.e2fs_ipb; j++) { dp = (struct ext2fs_dinode *)(buf + inodesize * j); /* h2fs32() just for consistency */ dp->e2di_gen = h2fs32(arc4random()); } wtfs(fsbtodb(&sblock, gd[cylno].ext2bgd_i_tables + i), sblock.e2fs_bsize, buf); } }
/* * Initialize a cylinder group. */ void initcg(int cylno, time_t utime) { int i, j, d, dlower, dupper, blkno, start; daddr64_t cbase, dmax; struct ufs1_dinode *dp1; struct ufs2_dinode *dp2; struct csum *cs; /* * Determine block bounds for cylinder group. Allow space for * super block summary information in first cylinder group. */ cbase = cgbase(&sblock, cylno); dmax = cbase + sblock.fs_fpg; if (dmax > sblock.fs_size) dmax = sblock.fs_size; if (fsbtodb(&sblock, cgsblock(&sblock, cylno)) + iobufsize / sectorsize > fssize) errx(40, "inode table does not fit in cylinder group"); dlower = cgsblock(&sblock, cylno) - cbase; dupper = cgdmin(&sblock, cylno) - cbase; if (cylno == 0) dupper += howmany(sblock.fs_cssize, sblock.fs_fsize); cs = &fscs[cylno]; memset(&acg, 0, sblock.fs_cgsize); acg.cg_ffs2_time = utime; acg.cg_magic = CG_MAGIC; acg.cg_cgx = cylno; acg.cg_ffs2_niblk = sblock.fs_ipg; acg.cg_initediblk = MIN(sblock.fs_ipg, 2 * INOPB(&sblock)); acg.cg_ndblk = dmax - cbase; start = sizeof(struct cg); if (Oflag <= 1) { /* Hack to maintain compatibility with old fsck. */ if (cylno == sblock.fs_ncg - 1) acg.cg_ncyl = 0; else acg.cg_ncyl = sblock.fs_cpg; acg.cg_time = acg.cg_ffs2_time; acg.cg_ffs2_time = 0; acg.cg_niblk = acg.cg_ffs2_niblk; acg.cg_ffs2_niblk = 0; acg.cg_initediblk = 0; acg.cg_btotoff = start; acg.cg_boff = acg.cg_btotoff + sblock.fs_cpg * sizeof(int32_t); acg.cg_iusedoff = acg.cg_boff + sblock.fs_cpg * sizeof(u_int16_t); } else { acg.cg_iusedoff = start; } acg.cg_freeoff = acg.cg_iusedoff + howmany(sblock.fs_ipg, CHAR_BIT); acg.cg_nextfreeoff = acg.cg_freeoff + howmany(sblock.fs_fpg, CHAR_BIT); if (acg.cg_nextfreeoff > sblock.fs_cgsize) errx(37, "panic: cylinder group too big: %d > %d", acg.cg_nextfreeoff, sblock.fs_cgsize); acg.cg_cs.cs_nifree += sblock.fs_ipg; if (cylno == 0) { for (i = 0; i < ROOTINO; i++) { setbit(cg_inosused(&acg), i); acg.cg_cs.cs_nifree--; } } if (cylno > 0) { /* * In cylno 0, space is reserved for boot and super blocks. */ for (d = 0; d < dlower; d += sblock.fs_frag) { blkno = d / sblock.fs_frag; setblock(&sblock, cg_blksfree(&acg), blkno); acg.cg_cs.cs_nbfree++; if (Oflag <= 1) { cg_blktot(&acg)[cbtocylno(&sblock, d)]++; cg_blks(&sblock, &acg, cbtocylno(&sblock, d)) [cbtorpos(&sblock, d)]++; } } } if ((i = dupper % sblock.fs_frag)) { acg.cg_frsum[sblock.fs_frag - i]++; for (d = dupper + sblock.fs_frag - i; dupper < d; dupper++) { setbit(cg_blksfree(&acg), dupper); acg.cg_cs.cs_nffree++; } } for (d = dupper; d + sblock.fs_frag <= acg.cg_ndblk; d += sblock.fs_frag) { blkno = d / sblock.fs_frag; setblock(&sblock, cg_blksfree(&acg), blkno); acg.cg_cs.cs_nbfree++; if (Oflag <= 1) { cg_blktot(&acg)[cbtocylno(&sblock, d)]++; cg_blks(&sblock, &acg, cbtocylno(&sblock, d)) [cbtorpos(&sblock, d)]++; } } if (d < acg.cg_ndblk) { acg.cg_frsum[acg.cg_ndblk - d]++; for (; d < acg.cg_ndblk; d++) { setbit(cg_blksfree(&acg), d); acg.cg_cs.cs_nffree++; } } *cs = acg.cg_cs; /* * Write out the duplicate superblock, the cylinder group map * and two blocks worth of inodes in a single write. */ start = sblock.fs_bsize > SBLOCKSIZE ? sblock.fs_bsize : SBLOCKSIZE; bcopy((char *)&acg, &iobuf[start], sblock.fs_cgsize); start += sblock.fs_bsize; dp1 = (struct ufs1_dinode *)(&iobuf[start]); dp2 = (struct ufs2_dinode *)(&iobuf[start]); for (i = MIN(sblock.fs_ipg, 2 * INOPB(&sblock)); i != 0; i--) { if (sblock.fs_magic == FS_UFS1_MAGIC) { dp1->di_gen = (u_int32_t)arc4random(); dp1++; } else { dp2->di_gen = (u_int32_t)arc4random(); dp2++; } } wtfs(fsbtodb(&sblock, cgsblock(&sblock, cylno)), iobufsize, iobuf); if (Oflag <= 1) { /* Initialize inodes for FFS1. */ for (i = 2 * sblock.fs_frag; i < sblock.fs_ipg / INOPF(&sblock); i += sblock.fs_frag) { dp1 = (struct ufs1_dinode *)(&iobuf[start]); for (j = 0; j < INOPB(&sblock); j++) { dp1->di_gen = (u_int32_t)arc4random(); dp1++; } wtfs(fsbtodb(&sblock, cgimin(&sblock, cylno) + i), sblock.fs_bsize, &iobuf[start]); } } }
void pass1(void) { ino_t inumber, inosused, ninosused; size_t inospace; struct inostat *info; int c; struct inodesc idesc; daddr_t i, cgd; u_int8_t *cp; /* * Set file system reserved blocks in used block map. */ for (c = 0; c < sblock.fs_ncg; c++) { cgd = cgdmin(&sblock, c); if (c == 0) i = cgbase(&sblock, c); else i = cgsblock(&sblock, c); for (; i < cgd; i++) setbmap(i); } i = sblock.fs_csaddr; cgd = i + howmany(sblock.fs_cssize, sblock.fs_fsize); for (; i < cgd; i++) setbmap(i); /* * Find all allocated blocks. */ memset(&idesc, 0, sizeof(struct inodesc)); idesc.id_type = ADDR; idesc.id_func = pass1check; n_files = n_blks = 0; info_inumber = 0; info_fn = pass1_info; for (c = 0; c < sblock.fs_ncg; c++) { inumber = c * sblock.fs_ipg; setinodebuf(inumber); getblk(&cgblk, cgtod(&sblock, c), sblock.fs_cgsize); if (sblock.fs_magic == FS_UFS2_MAGIC) { inosused = cgrp.cg_initediblk; if (inosused > sblock.fs_ipg) inosused = sblock.fs_ipg; } else inosused = sblock.fs_ipg; /* * If we are using soft updates, then we can trust the * cylinder group inode allocation maps to tell us which * inodes are allocated. We will scan the used inode map * to find the inodes that are really in use, and then * read only those inodes in from disk. */ if (preen && usedsoftdep) { cp = &cg_inosused(&cgrp)[(inosused - 1) / CHAR_BIT]; for ( ; inosused > 0; inosused -= CHAR_BIT, cp--) { if (*cp == 0) continue; for (i = 1 << (CHAR_BIT - 1); i > 0; i >>= 1) { if (*cp & i) break; inosused--; } break; } if (inosused < 0) inosused = 0; } /* * Allocate inoinfo structures for the allocated inodes. */ inostathead[c].il_numalloced = inosused; if (inosused == 0) { inostathead[c].il_stat = 0; continue; } info = calloc((unsigned)inosused, sizeof(struct inostat)); inospace = (unsigned)inosused * sizeof(struct inostat); if (info == NULL) errexit("cannot alloc %zu bytes for inoinfo", inospace); inostathead[c].il_stat = info; /* * Scan the allocated inodes. */ for (i = 0; i < inosused; i++, inumber++) { info_inumber = inumber; if (inumber < ROOTINO) { (void)getnextinode(inumber); continue; } checkinode(inumber, &idesc); } lastino += 1; if (inosused < sblock.fs_ipg || inumber == lastino) continue; /* * If we were not able to determine in advance which inodes * were in use, then reduce the size of the inoinfo structure * to the size necessary to describe the inodes that we * really found. */ if (lastino < (c * sblock.fs_ipg)) ninosused = 0; else ninosused = lastino - (c * sblock.fs_ipg); inostathead[c].il_numalloced = ninosused; if (ninosused == 0) { free(inostathead[c].il_stat); inostathead[c].il_stat = 0; continue; } if (ninosused != inosused) { struct inostat *ninfo; size_t ninospace; ninfo = reallocarray(info, ninosused, sizeof(*ninfo)); if (ninfo == NULL) { pfatal("too many inodes %llu, or out of memory\n", (unsigned long long)ninosused); exit(8); } ninospace = ninosused * sizeof(*ninfo); if (ninosused > inosused) memset(&ninfo[inosused], 0, ninospace - inospace); inostathead[c].il_stat = ninfo; } }
void mke2fs(const char *fsys, int fi, int fo) { struct timeval tv; int64_t minfssize; uint bcount, fbcount, ficount; uint blocks_gd, blocks_per_cg, inodes_per_cg, iblocks_per_cg; uint minblocks_per_cg, blocks_lastcg; uint ncg, cylno, sboff; uuid_t uuid; uint32_t uustat; int i, len, col, delta, fld_width, max_cols; struct winsize winsize; gettimeofday(&tv, NULL); fsi = fi; fso = fo; /* * collect and verify the block and fragment sizes */ if (!powerof2(bsize)) { errx(EXIT_FAILURE, "block size must be a power of 2, not %u\n", bsize); } if (!powerof2(fsize)) { errx(EXIT_FAILURE, "fragment size must be a power of 2, not %u\n", fsize); } if (fsize < sectorsize) { errx(EXIT_FAILURE, "fragment size %u is too small, minimum is %u\n", fsize, sectorsize); } if (bsize < MINBSIZE) { errx(EXIT_FAILURE, "block size %u is too small, minimum is %u\n", bsize, MINBSIZE); } if (bsize > EXT2_MAXBSIZE) { errx(EXIT_FAILURE, "block size %u is too large, maximum is %u\n", bsize, MAXBSIZE); } if (bsize != fsize) { /* * There is no fragment support on current ext2fs (yet?), * but some kernel code refers fsize or fpg as bsize or bpg * and Linux seems to set the same values to them. */ errx(EXIT_FAILURE, "block size (%u) can't be different from " "fragment size (%u)\n", bsize, fsize); } /* variable inodesize is REV1 feature */ if (Oflag == 0 && inodesize != EXT2_REV0_DINODE_SIZE) { errx(EXIT_FAILURE, "GOOD_OLD_REV file system format" " doesn't support %d byte inode\n", inodesize); } sblock.e2fs.e2fs_log_bsize = ilog2(bsize) - LOG_MINBSIZE; /* Umm, why not e2fs_log_fsize? */ sblock.e2fs.e2fs_fsize = ilog2(fsize) - LOG_MINBSIZE; sblock.e2fs_bsize = bsize; sblock.e2fs_bshift = sblock.e2fs.e2fs_log_bsize + LOG_MINBSIZE; sblock.e2fs_qbmask = sblock.e2fs_bsize - 1; sblock.e2fs_bmask = ~sblock.e2fs_qbmask; sblock.e2fs_fsbtodb = ilog2(sblock.e2fs_bsize) - ilog2(sectorsize); sblock.e2fs_ipb = sblock.e2fs_bsize / inodesize; /* * Ext2fs preserves BBSIZE (1024 bytes) space at the top for * bootloader (though it is not enough at all for our bootloader). * If bsize == BBSIZE we have to preserve one block. * If bsize > BBSIZE, the first block already contains BBSIZE space * before superblock because superblock is allocated at SBOFF and * bsize is a power of two (i.e. 2048 bytes or more). */ sblock.e2fs.e2fs_first_dblock = (sblock.e2fs_bsize > BBSIZE) ? 0 : 1; minfssize = fsbtodb(&sblock, sblock.e2fs.e2fs_first_dblock + NBLOCK_SUPERBLOCK + 1 /* at least one group descriptor */ + NBLOCK_BLOCK_BITMAP + NBLOCK_INODE_BITMAP + 1 /* at least one inode table block */ + 1 /* at least one data block for rootdir */ + 1 /* at least one data block for data */ ); /* XXX and more? */ if (fssize < minfssize) errx(EXIT_FAILURE, "Filesystem size %" PRId64 " < minimum size of %" PRId64 "\n", fssize, minfssize); bcount = dbtofsb(&sblock, fssize); /* * While many people claim that ext2fs is a (bad) clone of ufs/ffs, * it isn't actual ffs so maybe we should call it "block group" * as their native name rather than ffs derived "cylinder group." * But we'll use the latter here since other kernel sources use it. * (I also agree "cylinder" based allocation is obsolete though) */ /* maybe "simple is the best" */ blocks_per_cg = sblock.e2fs_bsize * NBBY; ncg = howmany(bcount - sblock.e2fs.e2fs_first_dblock, blocks_per_cg); blocks_gd = howmany(sizeof(struct ext2_gd) * ncg, bsize); /* check range of inode number */ if (num_inodes < EXT2_FIRSTINO) num_inodes = EXT2_FIRSTINO; /* needs reserved inodes + 1 */ if (num_inodes > UINT16_MAX * ncg) num_inodes = UINT16_MAX * ncg; /* ext2bgd_nifree is uint16_t */ inodes_per_cg = num_inodes / ncg; iblocks_per_cg = howmany(inodesize * inodes_per_cg, bsize); /* Check that the last cylinder group has enough space for inodes */ minblocks_per_cg = NBLOCK_BLOCK_BITMAP + NBLOCK_INODE_BITMAP + iblocks_per_cg + 1; /* at least one data block */ if (Oflag == 0 || cg_has_sb(ncg - 1) != 0) minblocks_per_cg += NBLOCK_SUPERBLOCK + blocks_gd; blocks_lastcg = bcount - sblock.e2fs.e2fs_first_dblock - blocks_per_cg * (ncg - 1); if (blocks_lastcg < minblocks_per_cg) { /* * Since we make all the cylinder groups the same size, the * last will only be small if there are more than one * cylinder groups. If the last one is too small to store * filesystem data, just kill it. * * XXX: Does fsck_ext2fs(8) properly handle this case? */ bcount -= blocks_lastcg; ncg--; blocks_lastcg = blocks_per_cg; blocks_gd = howmany(sizeof(struct ext2_gd) * ncg, bsize); inodes_per_cg = num_inodes / ncg; } /* roundup inodes_per_cg to make it use whole inode table blocks */ inodes_per_cg = roundup(inodes_per_cg, sblock.e2fs_ipb); num_inodes = inodes_per_cg * ncg; iblocks_per_cg = inodes_per_cg / sblock.e2fs_ipb; /* XXX: probably we should check these adjusted values again */ sblock.e2fs.e2fs_bcount = bcount; sblock.e2fs.e2fs_icount = num_inodes; sblock.e2fs_ncg = ncg; sblock.e2fs_ngdb = blocks_gd; sblock.e2fs_itpg = iblocks_per_cg; sblock.e2fs.e2fs_rbcount = sblock.e2fs.e2fs_bcount * minfree / 100; /* e2fs_fbcount will be accounted later */ /* e2fs_ficount will be accounted later */ sblock.e2fs.e2fs_bpg = blocks_per_cg; sblock.e2fs.e2fs_fpg = blocks_per_cg; sblock.e2fs.e2fs_ipg = inodes_per_cg; sblock.e2fs.e2fs_mtime = 0; sblock.e2fs.e2fs_wtime = tv.tv_sec; sblock.e2fs.e2fs_mnt_count = 0; /* XXX: should add some entropy to avoid checking all fs at once? */ sblock.e2fs.e2fs_max_mnt_count = EXT2_DEF_MAX_MNT_COUNT; sblock.e2fs.e2fs_magic = E2FS_MAGIC; sblock.e2fs.e2fs_state = E2FS_ISCLEAN; sblock.e2fs.e2fs_beh = E2FS_BEH_DEFAULT; sblock.e2fs.e2fs_minrev = 0; sblock.e2fs.e2fs_lastfsck = tv.tv_sec; sblock.e2fs.e2fs_fsckintv = EXT2_DEF_FSCKINTV; /* * Maybe we can use E2FS_OS_FREEBSD here and it would be more proper, * but the purpose of this newfs_ext2fs(8) command is to provide * a filesystem which can be recognized by firmware on some * Linux based appliances that can load bootstrap files only from * (their native) ext2fs, and anyway we will (and should) try to * act like them as much as possible. * * Anyway, I hope that all newer such boxes will keep their support * for the "GOOD_OLD_REV" ext2fs. */ sblock.e2fs.e2fs_creator = E2FS_OS_LINUX; if (Oflag == 0) { sblock.e2fs.e2fs_rev = E2FS_REV0; sblock.e2fs.e2fs_features_compat = 0; sblock.e2fs.e2fs_features_incompat = 0; sblock.e2fs.e2fs_features_rocompat = 0; } else { sblock.e2fs.e2fs_rev = E2FS_REV1; /* * e2fsprogs say "REV1" is "dynamic" so * it isn't quite a version and maybe it means * "extended from REV0 so check compat features." * * XXX: We don't have any native tool to activate * the EXT2F_COMPAT_RESIZE feature and * fsck_ext2fs(8) might not fix structures for it. */ sblock.e2fs.e2fs_features_compat = EXT2F_COMPAT_RESIZE; sblock.e2fs.e2fs_features_incompat = EXT2F_INCOMPAT_FTYPE; sblock.e2fs.e2fs_features_rocompat = EXT2F_ROCOMPAT_SPARSESUPER | EXT2F_ROCOMPAT_LARGEFILE; } sblock.e2fs.e2fs_ruid = geteuid(); sblock.e2fs.e2fs_rgid = getegid(); sblock.e2fs.e2fs_first_ino = EXT2_FIRSTINO; sblock.e2fs.e2fs_inode_size = inodesize; /* e2fs_block_group_nr is set on writing superblock to each group */ uuid_create(&uuid, &uustat); if (uustat != uuid_s_ok) errx(EXIT_FAILURE, "Failed to generate uuid\n"); uuid_enc_le(sblock.e2fs.e2fs_uuid, &uuid); if (volname != NULL) { if (strlen(volname) > sizeof(sblock.e2fs.e2fs_vname)) errx(EXIT_FAILURE, "Volume name is too long"); strlcpy(sblock.e2fs.e2fs_vname, volname, sizeof(sblock.e2fs.e2fs_vname)); } sblock.e2fs.e2fs_fsmnt[0] = '\0'; sblock.e2fs_fsmnt[0] = '\0'; sblock.e2fs.e2fs_algo = 0; /* XXX unsupported? */ sblock.e2fs.e2fs_prealloc = 0; /* XXX unsupported? */ sblock.e2fs.e2fs_dir_prealloc = 0; /* XXX unsupported? */ /* calculate blocks for reserved group descriptors for resize */ sblock.e2fs.e2fs_reserved_ngdb = 0; if (sblock.e2fs.e2fs_rev > E2FS_REV0 && (sblock.e2fs.e2fs_features_compat & EXT2F_COMPAT_RESIZE) != 0) { uint64_t target_blocks; uint target_ncg, target_ngdb, reserved_ngdb; /* reserve descriptors for size as 1024 times as current */ target_blocks = (sblock.e2fs.e2fs_bcount - sblock.e2fs.e2fs_first_dblock) * 1024ULL; /* number of blocks must be in uint32_t */ if (target_blocks > UINT32_MAX) target_blocks = UINT32_MAX; target_ncg = howmany(target_blocks, sblock.e2fs.e2fs_bpg); target_ngdb = howmany(sizeof(struct ext2_gd) * target_ncg, sblock.e2fs_bsize); /* * Reserved group descriptor blocks are preserved as * the second level double indirect reference blocks in * the EXT2_RESIZEINO inode, so the maximum number of * the blocks is NINDIR(fs). * (see also descriptions in init_resizeino() function) * * We check a number including current e2fs_ngdb here * because they will be moved into reserved gdb on * possible future size shrink, though e2fsprogs don't * seem to care about it. */ if (target_ngdb > NINDIR(&sblock)) target_ngdb = NINDIR(&sblock); reserved_ngdb = target_ngdb - sblock.e2fs_ngdb; /* make sure reserved_ngdb fits in the last cg */ if (reserved_ngdb >= blocks_lastcg - cgoverhead(ncg - 1)) reserved_ngdb = blocks_lastcg - cgoverhead(ncg - 1); if (reserved_ngdb == 0) { /* if no space for reserved gdb, disable the feature */ sblock.e2fs.e2fs_features_compat &= ~EXT2F_COMPAT_RESIZE; } sblock.e2fs.e2fs_reserved_ngdb = reserved_ngdb; } /* * Initialize group descriptors */ gd = malloc(sblock.e2fs_ngdb * bsize); if (gd == NULL) errx(EXIT_FAILURE, "Can't allocate descriptors buffer"); memset(gd, 0, sblock.e2fs_ngdb * bsize); fbcount = 0; ficount = 0; for (cylno = 0; cylno < ncg; cylno++) { uint boffset; boffset = cgbase(&sblock, cylno); if (sblock.e2fs.e2fs_rev == E2FS_REV0 || (sblock.e2fs.e2fs_features_rocompat & EXT2F_ROCOMPAT_SPARSESUPER) == 0 || cg_has_sb(cylno)) { boffset += NBLOCK_SUPERBLOCK + sblock.e2fs_ngdb; if (sblock.e2fs.e2fs_rev > E2FS_REV0 && (sblock.e2fs.e2fs_features_compat & EXT2F_COMPAT_RESIZE) != 0) boffset += sblock.e2fs.e2fs_reserved_ngdb; } gd[cylno].ext2bgd_b_bitmap = boffset; boffset += NBLOCK_BLOCK_BITMAP; gd[cylno].ext2bgd_i_bitmap = boffset; boffset += NBLOCK_INODE_BITMAP; gd[cylno].ext2bgd_i_tables = boffset; if (cylno == (ncg - 1)) gd[cylno].ext2bgd_nbfree = blocks_lastcg - cgoverhead(cylno); else gd[cylno].ext2bgd_nbfree = sblock.e2fs.e2fs_bpg - cgoverhead(cylno); fbcount += gd[cylno].ext2bgd_nbfree; gd[cylno].ext2bgd_nifree = sblock.e2fs.e2fs_ipg; if (cylno == 0) { /* take reserved inodes off nifree */ gd[cylno].ext2bgd_nifree -= EXT2_RESERVED_INODES; } ficount += gd[cylno].ext2bgd_nifree; gd[cylno].ext2bgd_ndirs = 0; } sblock.e2fs.e2fs_fbcount = fbcount; sblock.e2fs.e2fs_ficount = ficount; /* * Dump out summary information about file system. */ if (verbosity > 0) { printf("%s: %u.%1uMB (%" PRId64 " sectors) " "block size %u, fragment size %u\n", fsys, (uint)(((uint64_t)bcount * bsize) / (1024 * 1024)), (uint)((uint64_t)bcount * bsize - rounddown((uint64_t)bcount * bsize, 1024 * 1024)) / 1024 / 100, fssize, bsize, fsize); printf("\tusing %u block groups of %u.0MB, %u blks, " "%u inodes.\n", ncg, bsize * sblock.e2fs.e2fs_bpg / (1024 * 1024), sblock.e2fs.e2fs_bpg, sblock.e2fs.e2fs_ipg); } /* * allocate space for superblock and group descriptors */ iobufsize = (NBLOCK_SUPERBLOCK + sblock.e2fs_ngdb) * sblock.e2fs_bsize; iobuf = mmap(0, iobufsize, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0); if (iobuf == NULL) errx(EXIT_FAILURE, "Cannot allocate I/O buffer\n"); memset(iobuf, 0, iobufsize); /* * We now start writing to the filesystem */ if (!Nflag) { static const uint pbsize[] = { 1024, 2048, 4096, 0 }; uint pblock, epblock; /* * Validate the given file system size. * Verify that its last block can actually be accessed. * Convert to file system fragment sized units. */ if (fssize <= 0) errx(EXIT_FAILURE, "Preposterous size %" PRId64 "\n", fssize); wtfs(fssize - 1, sectorsize, iobuf); /* * Ensure there is nothing that looks like a filesystem * superblock anywhere other than where ours will be. * If fsck_ext2fs finds the wrong one all hell breaks loose! * * XXX: needs to check how fsck_ext2fs programs even * on other OSes determine alternate superblocks */ for (i = 0; pbsize[i] != 0; i++) { epblock = (uint64_t)bcount * bsize / pbsize[i]; for (pblock = ((pbsize[i] == SBSIZE) ? 1 : 0); pblock < epblock; pblock += pbsize[i] * NBBY /* bpg */) zap_old_sblock((daddr_t)pblock * pbsize[i] / sectorsize); } } if (verbosity >= 3) printf("super-block backups (for fsck_ext2fs -b #) at:\n"); /* If we are printing more than one line of numbers, line up columns */ fld_width = verbosity < 4 ? 1 : snprintf(NULL, 0, "%" PRIu64, (uint64_t)cgbase(&sblock, ncg - 1)); /* Get terminal width */ if (ioctl(fileno(stdout), TIOCGWINSZ, &winsize) == 0) max_cols = winsize.ws_col; else max_cols = 80; if (Nflag && verbosity == 3) /* Leave space to add " ..." after one row of numbers */ max_cols -= 4; #define BASE 0x10000 /* For some fixed-point maths */ col = 0; delta = verbosity > 2 ? 0 : max_cols * BASE / ncg; for (cylno = 0; cylno < ncg; cylno++) { fflush(stdout); initcg(cylno); if (verbosity < 2) continue; /* the first one is a master, not backup */ if (cylno == 0) continue; /* skip if this cylinder doesn't have a backup */ if (sblock.e2fs.e2fs_rev > E2FS_REV0 && (sblock.e2fs.e2fs_features_rocompat & EXT2F_ROCOMPAT_SPARSESUPER) != 0 && cg_has_sb(cylno) == 0) continue; if (delta > 0) { if (Nflag) /* No point doing dots for -N */ break; /* Print dots scaled to end near RH margin */ for (col += delta; col > BASE; col -= BASE) printf("."); continue; } /* Print superblock numbers */ len = printf(" %*" PRIu64 "," + !col, fld_width, (uint64_t)cgbase(&sblock, cylno)); col += len; if (col + len < max_cols) /* Next number fits */ continue; /* Next number won't fit, need a newline */ if (verbosity <= 3) { /* Print dots for subsequent cylinder groups */ delta = sblock.e2fs_ncg - cylno - 1; if (delta != 0) { if (Nflag) { printf(" ..."); break; } delta = max_cols * BASE / delta; } } col = 0; printf("\n"); } #undef BASE if (col > 0) printf("\n"); if (Nflag) return; /* * Now construct the initial file system, */ if (fsinit(&tv) == 0) errx(EXIT_FAILURE, "Error making filesystem"); /* * Write out the superblock and group descriptors */ sblock.e2fs.e2fs_block_group_nr = 0; sboff = 0; if (cgbase(&sblock, 0) == 0) { /* * If the first block contains the boot block sectors, * (i.e. in case of sblock.e2fs.e2fs_bsize > BBSIZE) * we have to preserve data in it. */ sboff = SBOFF; } e2fs_sbsave(&sblock.e2fs, (struct ext2fs *)(iobuf + sboff)); e2fs_cgsave(gd, (struct ext2_gd *)(iobuf + sblock.e2fs_bsize), sizeof(struct ext2_gd) * sblock.e2fs_ncg); wtfs(fsbtodb(&sblock, cgbase(&sblock, 0)) + sboff / sectorsize, iobufsize - sboff, iobuf + sboff); munmap(iobuf, iobufsize); }
static int read_bsdcg(struct fs *fsp, struct cg *cgp, int cg, u_int32_t offset) { int i, max; u_int8_t *p; int count, j; unsigned long dboff, dbcount, dbstart; max = fsp->fs_fpg; p = cg_blksfree(cgp); /* paranoia: make sure we stay in the buffer */ assert(&p[max/NBBY] <= (u_int8_t *)cgp + fsp->fs_cgsize); /* * XXX The bitmap is fragments, not FS blocks. * * The block bitmap lists blocks relative to the base (cgbase()) of * the cylinder group. cgdmin() is the first actual datablock, but * the bitmap includes all the blocks used for all the blocks * comprising the cg. These include the superblock, cg, inodes, * datablocks and the variable-sized padding before all of these * (used to skew the offset of consecutive cgs). * The "dbstart" parameter is thus the beginning of the cg, to which * we add the bitmap offset. All blocks before cgdmin() will always * be allocated, but we scan them anyway. */ //assert(cgbase(fsp, cg) == cgstart(fsp, cg)); dbstart = fsbtodb(fsp, cgbase(fsp, cg)) + offset; if (debug > 2) fprintf(stderr, " "); for (count = i = 0; i < max; i++) if (isset(p, i)) { j = i; while ((i+1)<max && isset(p, i+1)) i++; dboff = dbstart + fsbtodb(fsp, j); dbcount = fsbtodb(fsp, (i-j) + 1); freecount += (i-j) + 1; if (debug > 2) { if (count) fprintf(stderr, ",%s", count % 4 ? " " : "\n "); fprintf(stderr, "%lu:%ld", dboff, dbcount); } addskip(dboff, dbcount); count++; } if (debug > 2) fprintf(stderr, "\n"); #ifdef DO_INODES /* * Look for free inodes */ if (cgp->cg_cs.cs_nifree != 0) { int tifree = 0; unsigned long edboff; int ino; p = cg_inosused(cgp); max = fsp->fs_ipg; assert(&p[max/NBBY] <= (u_int8_t *)cgp + fsp->fs_cgsize); /* * For UFS2, (cylinder-group relative) inode numbers beyond * initediblk are uninitialized. We do not process those * now. They are treated as regular free blocks below. */ if (fsp->fs_magic == FS_UFS2_MAGIC) { assert(cgp->cg_initediblk > 0); assert(cgp->cg_initediblk <= fsp->fs_ipg); assert((cgp->cg_initediblk % INOPB(fsp)) == 0); max = cgp->cg_initediblk; } ino = cg * fsp->fs_ipg; #ifdef CLEAR_FREE_INODES if (metaoptimize) { static uint32_t ufs1_magic = FS_UFS1_MAGIC; static uint32_t ufs2_magic = FS_UFS2_MAGIC; uint32_t *magic; if (debug > 1) fprintf(stderr, " \t ifree %9d\n", cgp->cg_cs.cs_nifree); if (debug > 2) fprintf(stderr, " "); magic = (fsp->fs_magic == FS_UFS2_MAGIC) ? &ufs2_magic : &ufs1_magic; for (count = i = 0; i < max; i++) { if (isset(p, i)) { continue; } if (ino_to_fsbo(fsp, ino+i) == 0) { j = i; while ((i+1) < max && !isset(p, i+1)) i++; dboff = fsbtodb(fsp, ino_to_fsba(fsp, ino+j)); edboff = fsbtodb(fsp, ino_to_fsba(fsp, ino+i)); #if 0 fprintf(stderr, " found free inodes %d-%d" " db %lu.%u to %lu.%u\n", ino+j, ino+i, dboff+offset, ino_to_fsbo(fsp, ino+j), edboff+offset, ino_to_fsbo(fsp, ino+i)); #endif tifree += (i+1 - j); dbcount = edboff - dboff; if ((i+1) == max) dbcount++; if (dbcount == 0) continue; addfixupfunc(inodefixup, sectobytes(dboff+offset), sectobytes(offset), sectobytes(dbcount), magic, sizeof(magic), RELOC_NONE); if (debug > 2) { if (count) fprintf(stderr, ",%s", count % 4 ? " " : "\n "); fprintf(stderr, "%lu:%ld", dboff+offset, dbcount); } count++; } else tifree++; } assert(i == max); if (debug > 2) fprintf(stderr, "\n"); } #endif /* * For UFS2, deal with uninitialized inodes. * These are sweet, we just add them to the skip list. */ if (fsp->fs_magic == FS_UFS2_MAGIC && max < fsp->fs_ipg) { i = max; if (debug > 1) fprintf(stderr, " \t uninit %9d\n", fsp->fs_ipg - i); if (debug > 2) fprintf(stderr, " "); max = fsp->fs_ipg; #if 1 /* * Paranoia! */ j = i; while ((j+1) < max) { assert(!isset(p, j+1)); j++; } #endif tifree += (max - i); dboff = fsbtodb(fsp, ino_to_fsba(fsp, ino+i)); edboff = fsbtodb(fsp, ino_to_fsba(fsp, ino+max-1)); dbcount = edboff - dboff + 1; if (debug > 2) fprintf(stderr, "%lu:%ld", dboff+offset, dbcount); addskip(dboff+offset, dbcount); if (debug > 2) fprintf(stderr, "\n"); } #ifdef CLEAR_FREE_INODES if (metaoptimize && tifree != cgp->cg_cs.cs_nifree) fprintf(stderr, "Uh-oh! found %d free inodes, " "shoulda found %d\n", tifree, cgp->cg_cs.cs_nifree); #endif } #endif return 0; }
pass5() { int c, blk, frags, basesize, sumsize, mapsize, savednrpos; register struct fs *fs = &sblock; register struct cg *cg = &cgrp; daddr_t dbase, dmax; register daddr_t d; register long i, j; struct csum *cs; time_t now; struct csum cstotal; struct inodesc idesc; char buf[MAXBSIZE]; register struct cg *newcg = (struct cg *)buf; struct ocg *ocg = (struct ocg *)buf; bzero((char *)newcg, fs->fs_cgsize); newcg->cg_niblk = fs->fs_ipg; switch (fs->fs_postblformat) { case FS_42POSTBLFMT: basesize = (char *)(&ocg->cg_btot[0]) - (char *)(&ocg->cg_link); sumsize = &ocg->cg_iused[0] - (char *)(&ocg->cg_btot[0]); mapsize = &ocg->cg_free[howmany(fs->fs_fpg, NBBY)] - (u_char *)&ocg->cg_iused[0]; ocg->cg_magic = CG_MAGIC; savednrpos = fs->fs_nrpos; fs->fs_nrpos = 8; break; case FS_DYNAMICPOSTBLFMT: newcg->cg_btotoff = &newcg->cg_space[0] - (u_char *)(&newcg->cg_link); newcg->cg_boff = newcg->cg_btotoff + fs->fs_cpg * sizeof(long); newcg->cg_iusedoff = newcg->cg_boff + fs->fs_cpg * fs->fs_nrpos * sizeof(short); newcg->cg_freeoff = newcg->cg_iusedoff + howmany(fs->fs_ipg, NBBY); newcg->cg_nextfreeoff = newcg->cg_freeoff + howmany(fs->fs_cpg * fs->fs_spc / NSPF(fs), NBBY); newcg->cg_magic = CG_MAGIC; basesize = &newcg->cg_space[0] - (u_char *)(&newcg->cg_link); sumsize = newcg->cg_iusedoff - newcg->cg_btotoff; mapsize = newcg->cg_nextfreeoff - newcg->cg_iusedoff; break; default: errexit("UNKNOWN ROTATIONAL TABLE FORMAT %d\n", fs->fs_postblformat); } bzero((char *)&idesc, sizeof(struct inodesc)); idesc.id_type = ADDR; bzero((char *)&cstotal, sizeof(struct csum)); (void)time(&now); for (i = fs->fs_size; i < fragroundup(fs, fs->fs_size); i++) setbmap(i); for (c = 0; c < fs->fs_ncg; c++) { getblk(&cgblk, cgtod(fs, c), fs->fs_cgsize); if (!cg_chkmagic(cg)) pfatal("CG %d: BAD MAGIC NUMBER\n", c); dbase = cgbase(fs, c); dmax = dbase + fs->fs_fpg; if (dmax > fs->fs_size) dmax = fs->fs_size; if (now > cg->cg_time) newcg->cg_time = cg->cg_time; else newcg->cg_time = now; newcg->cg_cgx = c; if (c == fs->fs_ncg - 1) newcg->cg_ncyl = fs->fs_ncyl % fs->fs_cpg; else newcg->cg_ncyl = fs->fs_cpg; newcg->cg_ndblk = dmax - dbase; newcg->cg_cs.cs_ndir = 0; newcg->cg_cs.cs_nffree = 0; newcg->cg_cs.cs_nbfree = 0; newcg->cg_cs.cs_nifree = fs->fs_ipg; if (cg->cg_rotor < newcg->cg_ndblk) newcg->cg_rotor = cg->cg_rotor; else newcg->cg_rotor = 0; if (cg->cg_frotor < newcg->cg_ndblk) newcg->cg_frotor = cg->cg_frotor; else newcg->cg_frotor = 0; if (cg->cg_irotor < newcg->cg_niblk) newcg->cg_irotor = cg->cg_irotor; else newcg->cg_irotor = 0; bzero((char *)&newcg->cg_frsum[0], sizeof newcg->cg_frsum); bzero((char *)&cg_blktot(newcg)[0], sumsize + mapsize); if (fs->fs_postblformat == FS_42POSTBLFMT) ocg->cg_magic = CG_MAGIC; j = fs->fs_ipg * c; for (i = 0; i < fs->fs_ipg; j++, i++) { switch (statemap[j]) { case USTATE: break; case DSTATE: case DCLEAR: case DFOUND: newcg->cg_cs.cs_ndir++; /* fall through */ case FSTATE: case FCLEAR: newcg->cg_cs.cs_nifree--; setbit(cg_inosused(newcg), i); break; default: if (j < ROOTINO) break; errexit("BAD STATE %d FOR INODE I=%d", statemap[j], j); } } if (c == 0) for (i = 0; i < ROOTINO; i++) { setbit(cg_inosused(newcg), i); newcg->cg_cs.cs_nifree--; } for (i = 0, d = dbase; d < dmax; d += fs->fs_frag, i += fs->fs_frag) { frags = 0; for (j = 0; j < fs->fs_frag; j++) { if (getbmap(d + j)) continue; setbit(cg_blksfree(newcg), i + j); frags++; } if (frags == fs->fs_frag) { newcg->cg_cs.cs_nbfree++; j = cbtocylno(fs, i); cg_blktot(newcg)[j]++; cg_blks(fs, newcg, j)[cbtorpos(fs, i)]++; } else if (frags > 0) { newcg->cg_cs.cs_nffree += frags; blk = blkmap(fs, cg_blksfree(newcg), i); fragacct(fs, blk, newcg->cg_frsum, 1); } } cstotal.cs_nffree += newcg->cg_cs.cs_nffree; cstotal.cs_nbfree += newcg->cg_cs.cs_nbfree; cstotal.cs_nifree += newcg->cg_cs.cs_nifree; cstotal.cs_ndir += newcg->cg_cs.cs_ndir; cs = &fs->fs_cs(fs, c); if (bcmp((char *)&newcg->cg_cs, (char *)cs, sizeof *cs) != 0 && dofix(&idesc, "FREE BLK COUNT(S) WRONG IN SUPERBLK")) { bcopy((char *)&newcg->cg_cs, (char *)cs, sizeof *cs); sbdirty(); } if (cvtflag) { bcopy((char *)newcg, (char *)cg, fs->fs_cgsize); cgdirty(); continue; } if (bcmp(cg_inosused(newcg), cg_inosused(cg), mapsize) != 0 && dofix(&idesc, "BLK(S) MISSING IN BIT MAPS")) { bcopy(cg_inosused(newcg), cg_inosused(cg), mapsize); cgdirty(); } if ((bcmp((char *)newcg, (char *)cg, basesize) != 0 || bcmp((char *)&cg_blktot(newcg)[0], (char *)&cg_blktot(cg)[0], sumsize) != 0) && dofix(&idesc, "SUMMARY INFORMATION BAD")) { bcopy((char *)newcg, (char *)cg, basesize); bcopy((char *)&cg_blktot(newcg)[0], (char *)&cg_blktot(cg)[0], sumsize); cgdirty(); } } if (fs->fs_postblformat == FS_42POSTBLFMT) fs->fs_nrpos = savednrpos; if (bcmp((char *)&cstotal, (char *)&fs->fs_cstotal, sizeof *cs) != 0 && dofix(&idesc, "FREE BLK COUNT(S) WRONG IN SUPERBLK")) { bcopy((char *)&cstotal, (char *)&fs->fs_cstotal, sizeof *cs); fs->fs_ronly = 0; fs->fs_fmod = 0; sbdirty(); } }
/* * Verify cylinder group's magic number and other parameters. If the * test fails, offer an option to rebuild the whole cylinder group. */ int check_cgmagic(int cg, struct cg *cgp) { /* * Extended cylinder group checks. */ if (cg_chkmagic(cgp) && ((sblock.fs_magic == FS_UFS1_MAGIC && cgp->cg_old_niblk == sblock.fs_ipg && cgp->cg_ndblk <= sblock.fs_fpg && cgp->cg_old_ncyl <= sblock.fs_old_cpg) || (sblock.fs_magic == FS_UFS2_MAGIC && cgp->cg_niblk == sblock.fs_ipg && cgp->cg_ndblk <= sblock.fs_fpg && cgp->cg_initediblk <= sblock.fs_ipg))) { return (1); } pfatal("CYLINDER GROUP %d: BAD MAGIC NUMBER", cg); if (!reply("REBUILD CYLINDER GROUP")) { printf("YOU WILL NEED TO RERUN FSCK.\n"); rerun = 1; return (1); } /* * Zero out the cylinder group and then initialize critical fields. * Bit maps and summaries will be recalculated by later passes. */ memset(cgp, 0, (size_t)sblock.fs_cgsize); cgp->cg_magic = CG_MAGIC; cgp->cg_cgx = cg; cgp->cg_niblk = sblock.fs_ipg; cgp->cg_initediblk = sblock.fs_ipg < 2 * INOPB(&sblock) ? sblock.fs_ipg : 2 * INOPB(&sblock); if (cgbase(&sblock, cg) + sblock.fs_fpg < sblock.fs_size) cgp->cg_ndblk = sblock.fs_fpg; else cgp->cg_ndblk = sblock.fs_size - cgbase(&sblock, cg); cgp->cg_iusedoff = &cgp->cg_space[0] - (u_char *)(&cgp->cg_firstfield); if (sblock.fs_magic == FS_UFS1_MAGIC) { cgp->cg_niblk = 0; cgp->cg_initediblk = 0; cgp->cg_old_ncyl = sblock.fs_old_cpg; cgp->cg_old_niblk = sblock.fs_ipg; cgp->cg_old_btotoff = cgp->cg_iusedoff; cgp->cg_old_boff = cgp->cg_old_btotoff + sblock.fs_old_cpg * sizeof(int32_t); cgp->cg_iusedoff = cgp->cg_old_boff + sblock.fs_old_cpg * sizeof(u_int16_t); } cgp->cg_freeoff = cgp->cg_iusedoff + howmany(sblock.fs_ipg, CHAR_BIT); cgp->cg_nextfreeoff = cgp->cg_freeoff + howmany(sblock.fs_fpg,CHAR_BIT); if (sblock.fs_contigsumsize > 0) { cgp->cg_nclusterblks = cgp->cg_ndblk / sblock.fs_frag; cgp->cg_clustersumoff = roundup(cgp->cg_nextfreeoff, sizeof(u_int32_t)); cgp->cg_clustersumoff -= sizeof(u_int32_t); cgp->cg_clusteroff = cgp->cg_clustersumoff + (sblock.fs_contigsumsize + 1) * sizeof(u_int32_t); cgp->cg_nextfreeoff = cgp->cg_clusteroff + howmany(fragstoblks(&sblock, sblock.fs_fpg), CHAR_BIT); } cgdirty(); return (0); }
void pass5(void) { int c, blk, frags, basesize, sumsize, mapsize, savednrpos = 0; int inomapsize, blkmapsize; struct fs *fs = &sblock; struct cg *cg = &cgrp; ufs_daddr_t dbase, dmax; ufs_daddr_t d; long i, j, k; struct csum *cs; struct csum cstotal; struct inodesc idesc[3]; char buf[MAXBSIZE]; struct cg *newcg = (struct cg *)buf; struct ocg *ocg = (struct ocg *)buf; inoinfo(WINO)->ino_state = USTATE; memset(newcg, 0, (size_t)fs->fs_cgsize); newcg->cg_niblk = fs->fs_ipg; if (cvtlevel >= 3) { if (fs->fs_maxcontig < 2 && fs->fs_contigsumsize > 0) { if (preen) pwarn("DELETING CLUSTERING MAPS\n"); if (preen || reply("DELETE CLUSTERING MAPS")) { fs->fs_contigsumsize = 0; doinglevel1 = 1; sbdirty(); } } if (fs->fs_maxcontig > 1) { char *doit = NULL; if (fs->fs_contigsumsize < 1) { doit = "CREAT"; } else if (fs->fs_contigsumsize < fs->fs_maxcontig && fs->fs_contigsumsize < FS_MAXCONTIG) { doit = "EXPAND"; } if (doit) { i = fs->fs_contigsumsize; fs->fs_contigsumsize = MIN(fs->fs_maxcontig, FS_MAXCONTIG); if (CGSIZE(fs) > fs->fs_bsize) { pwarn("CANNOT %s CLUSTER MAPS\n", doit); fs->fs_contigsumsize = i; } else if (preen || reply("CREATE CLUSTER MAPS")) { if (preen) pwarn("%sING CLUSTER MAPS\n", doit); fs->fs_cgsize = fragroundup(fs, CGSIZE(fs)); doinglevel1 = 1; sbdirty(); } } } } switch ((int)fs->fs_postblformat) { case FS_42POSTBLFMT: basesize = (char *)(&ocg->cg_btot[0]) - (char *)(&ocg->cg_firstfield); sumsize = &ocg->cg_iused[0] - (u_int8_t *)(&ocg->cg_btot[0]); mapsize = &ocg->cg_free[howmany(fs->fs_fpg, NBBY)] - (u_char *)&ocg->cg_iused[0]; blkmapsize = howmany(fs->fs_fpg, NBBY); inomapsize = &ocg->cg_free[0] - (u_char *)&ocg->cg_iused[0]; ocg->cg_magic = CG_MAGIC; savednrpos = fs->fs_nrpos; fs->fs_nrpos = 8; break; case FS_DYNAMICPOSTBLFMT: newcg->cg_btotoff = &newcg->cg_space[0] - (u_char *)(&newcg->cg_firstfield); newcg->cg_boff = newcg->cg_btotoff + fs->fs_cpg * sizeof(int32_t); newcg->cg_iusedoff = newcg->cg_boff + fs->fs_cpg * fs->fs_nrpos * sizeof(u_int16_t); newcg->cg_freeoff = newcg->cg_iusedoff + howmany(fs->fs_ipg, NBBY); inomapsize = newcg->cg_freeoff - newcg->cg_iusedoff; newcg->cg_nextfreeoff = newcg->cg_freeoff + howmany(fs->fs_cpg * fs->fs_spc / NSPF(fs), NBBY); blkmapsize = newcg->cg_nextfreeoff - newcg->cg_freeoff; if (fs->fs_contigsumsize > 0) { newcg->cg_clustersumoff = newcg->cg_nextfreeoff - sizeof(u_int32_t); newcg->cg_clustersumoff = roundup(newcg->cg_clustersumoff, sizeof(u_int32_t)); newcg->cg_clusteroff = newcg->cg_clustersumoff + (fs->fs_contigsumsize + 1) * sizeof(u_int32_t); newcg->cg_nextfreeoff = newcg->cg_clusteroff + howmany(fs->fs_cpg * fs->fs_spc / NSPB(fs), NBBY); } newcg->cg_magic = CG_MAGIC; basesize = &newcg->cg_space[0] - (u_char *)(&newcg->cg_firstfield); sumsize = newcg->cg_iusedoff - newcg->cg_btotoff; mapsize = newcg->cg_nextfreeoff - newcg->cg_iusedoff; break; default: inomapsize = blkmapsize = sumsize = 0; /* keep lint happy */ errx(EEXIT, "UNKNOWN ROTATIONAL TABLE FORMAT %d", fs->fs_postblformat); } memset(&idesc[0], 0, sizeof idesc); for (i = 0; i < 3; i++) { idesc[i].id_type = ADDR; if (doinglevel2) idesc[i].id_fix = FIX; } memset(&cstotal, 0, sizeof(struct csum)); j = blknum(fs, fs->fs_size + fs->fs_frag - 1); for (i = fs->fs_size; i < j; i++) setbmap(i); for (c = 0; c < fs->fs_ncg; c++) { if (got_siginfo) { printf("%s: phase 5: cyl group %d of %d (%d%%)\n", cdevname, c, sblock.fs_ncg, c * 100 / sblock.fs_ncg); got_siginfo = 0; } getblk(&cgblk, cgtod(fs, c), fs->fs_cgsize); if (!cg_chkmagic(cg)) pfatal("CG %d: BAD MAGIC NUMBER\n", c); dbase = cgbase(fs, c); dmax = dbase + fs->fs_fpg; if (dmax > fs->fs_size) dmax = fs->fs_size; newcg->cg_time = cg->cg_time; newcg->cg_cgx = c; if (c == fs->fs_ncg - 1) newcg->cg_ncyl = fs->fs_ncyl % fs->fs_cpg; else newcg->cg_ncyl = fs->fs_cpg; newcg->cg_ndblk = dmax - dbase; if (fs->fs_contigsumsize > 0) newcg->cg_nclusterblks = newcg->cg_ndblk / fs->fs_frag; newcg->cg_cs.cs_ndir = 0; newcg->cg_cs.cs_nffree = 0; newcg->cg_cs.cs_nbfree = 0; newcg->cg_cs.cs_nifree = fs->fs_ipg; if ((cg->cg_rotor >= 0) && (cg->cg_rotor < newcg->cg_ndblk)) newcg->cg_rotor = cg->cg_rotor; else newcg->cg_rotor = 0; if ((cg->cg_frotor >= 0) && (cg->cg_frotor < newcg->cg_ndblk)) newcg->cg_frotor = cg->cg_frotor; else newcg->cg_frotor = 0; if ((cg->cg_irotor >= 0) && (cg->cg_irotor < newcg->cg_niblk)) newcg->cg_irotor = cg->cg_irotor; else newcg->cg_irotor = 0; memset(&newcg->cg_frsum[0], 0, sizeof newcg->cg_frsum); memset(&cg_blktot(newcg)[0], 0, (size_t)(sumsize + mapsize)); if (fs->fs_postblformat == FS_42POSTBLFMT) ocg->cg_magic = CG_MAGIC; j = fs->fs_ipg * c; for (i = 0; i < inostathead[c].il_numalloced; j++, i++) { switch (inoinfo(j)->ino_state) { case USTATE: break; case DSTATE: case DCLEAR: case DFOUND: newcg->cg_cs.cs_ndir++; /* fall through */ case FSTATE: case FCLEAR: newcg->cg_cs.cs_nifree--; setbit(cg_inosused(newcg), i); break; default: if (j < ROOTINO) break; errx(EEXIT, "BAD STATE %d FOR INODE I=%ld", inoinfo(j)->ino_state, j); } } if (c == 0) for (i = 0; i < ROOTINO; i++) { setbit(cg_inosused(newcg), i); newcg->cg_cs.cs_nifree--; } for (i = 0, d = dbase; d < dmax; d += fs->fs_frag, i += fs->fs_frag) { frags = 0; for (j = 0; j < fs->fs_frag; j++) { if (testbmap(d + j)) continue; setbit(cg_blksfree(newcg), i + j); frags++; } if (frags == fs->fs_frag) { newcg->cg_cs.cs_nbfree++; j = cbtocylno(fs, i); cg_blktot(newcg)[j]++; cg_blks(fs, newcg, j)[cbtorpos(fs, i)]++; if (fs->fs_contigsumsize > 0) setbit(cg_clustersfree(newcg), i / fs->fs_frag); } else if (frags > 0) { newcg->cg_cs.cs_nffree += frags; blk = blkmap(fs, cg_blksfree(newcg), i); ffs_fragacct(fs, blk, newcg->cg_frsum, 1); } } if (fs->fs_contigsumsize > 0) { int32_t *sump = cg_clustersum(newcg); u_char *mapp = cg_clustersfree(newcg); int map = *mapp++; int bit = 1; int run = 0; for (i = 0; i < newcg->cg_nclusterblks; i++) { if ((map & bit) != 0) { run++; } else if (run != 0) { if (run > fs->fs_contigsumsize) run = fs->fs_contigsumsize; sump[run]++; run = 0; } if ((i & (NBBY - 1)) != (NBBY - 1)) { bit <<= 1; } else { map = *mapp++; bit = 1; } } if (run != 0) { if (run > fs->fs_contigsumsize) run = fs->fs_contigsumsize; sump[run]++; } } cstotal.cs_nffree += newcg->cg_cs.cs_nffree; cstotal.cs_nbfree += newcg->cg_cs.cs_nbfree; cstotal.cs_nifree += newcg->cg_cs.cs_nifree; cstotal.cs_ndir += newcg->cg_cs.cs_ndir; cs = &fs->fs_cs(fs, c); if (memcmp(&newcg->cg_cs, cs, sizeof *cs) != 0 && dofix(&idesc[0], "FREE BLK COUNT(S) WRONG IN SUPERBLK")) { memmove(cs, &newcg->cg_cs, sizeof *cs); sbdirty(); } if (doinglevel1) { memmove(cg, newcg, (size_t)fs->fs_cgsize); cgdirty(); continue; } if ((memcmp(newcg, cg, basesize) != 0 || memcmp(&cg_blktot(newcg)[0], &cg_blktot(cg)[0], sumsize) != 0) && dofix(&idesc[2], "SUMMARY INFORMATION BAD")) { memmove(cg, newcg, (size_t)basesize); memmove(&cg_blktot(cg)[0], &cg_blktot(newcg)[0], (size_t)sumsize); cgdirty(); } if (usedsoftdep) { for (i = 0; i < inomapsize; i++) { j = cg_inosused(newcg)[i]; if ((cg_inosused(cg)[i] & j) == j) continue; for (k = 0; k < NBBY; k++) { if ((j & (1 << k)) == 0) continue; if (cg_inosused(cg)[i] & (1 << k)) continue; pwarn("ALLOCATED INODE %d MARKED FREE\n", c * fs->fs_ipg + i * NBBY + k); } } for (i = 0; i < blkmapsize; i++) { j = cg_blksfree(cg)[i]; if ((cg_blksfree(newcg)[i] & j) == j) continue; for (k = 0; k < NBBY; k++) { if ((j & (1 << k)) == 0) continue; if (cg_blksfree(newcg)[i] & (1 << k)) continue; pwarn("ALLOCATED FRAG %d MARKED FREE\n", c * fs->fs_fpg + i * NBBY + k); } } } if (memcmp(cg_inosused(newcg), cg_inosused(cg), mapsize) != 0 && dofix(&idesc[1], "BLK(S) MISSING IN BIT MAPS")) { memmove(cg_inosused(cg), cg_inosused(newcg), (size_t)mapsize); cgdirty(); } } if (fs->fs_postblformat == FS_42POSTBLFMT) fs->fs_nrpos = savednrpos; if (memcmp(&cstotal, &fs->fs_cstotal, sizeof *cs) != 0 && dofix(&idesc[0], "FREE BLK COUNT(S) WRONG IN SUPERBLK")) { memmove(&fs->fs_cstotal, &cstotal, sizeof *cs); fs->fs_ronly = 0; fs->fs_fmod = 0; sbdirty(); } }
void pass5(void) { int c, blk, frags, basesize, sumsize, mapsize, cssize; int inomapsize, blkmapsize; struct fs *fs = sblock; daddr_t dbase, dmax; daddr_t d; long i, j, k; struct csum *cs; struct csum_total cstotal; struct inodesc idesc[4]; char buf[MAXBSIZE]; struct cg *newcg = (struct cg *)buf; struct ocg *ocg = (struct ocg *)buf; struct cg *cg = cgrp, *ncg; struct inostat *info; u_int32_t ncgsize; inoinfo(WINO)->ino_state = USTATE; memset(newcg, 0, (size_t)fs->fs_cgsize); newcg->cg_niblk = fs->fs_ipg; if (cvtlevel >= 3) { if (fs->fs_maxcontig < 2 && fs->fs_contigsumsize > 0) { if (preen) pwarn("DELETING CLUSTERING MAPS\n"); if (preen || reply("DELETE CLUSTERING MAPS")) { fs->fs_contigsumsize = 0; doinglevel1 = 1; sbdirty(); } } if (fs->fs_maxcontig > 1) { const char *doit = NULL; if (fs->fs_contigsumsize < 1) { doit = "CREAT"; } else if (fs->fs_contigsumsize < fs->fs_maxcontig && fs->fs_contigsumsize < FS_MAXCONTIG) { doit = "EXPAND"; } if (doit) { i = fs->fs_contigsumsize; fs->fs_contigsumsize = MIN(fs->fs_maxcontig, FS_MAXCONTIG); if (CGSIZE(fs) > fs->fs_bsize) { pwarn("CANNOT %s CLUSTER MAPS\n", doit); fs->fs_contigsumsize = i; } else if (preen || reply("CREATE CLUSTER MAPS")) { if (preen) pwarn("%sING CLUSTER MAPS\n", doit); ncgsize = fragroundup(fs, CGSIZE(fs)); ncg = realloc(cgrp, ncgsize); if (ncg == NULL) errexit( "cannot reallocate cg space"); cg = cgrp = ncg; fs->fs_cgsize = ncgsize; doinglevel1 = 1; sbdirty(); } } } } basesize = &newcg->cg_space[0] - (u_char *)(&newcg->cg_firstfield); cssize = (u_char *)&cstotal.cs_spare[0] - (u_char *)&cstotal.cs_ndir; sumsize = 0; if (is_ufs2) { newcg->cg_iusedoff = basesize; } else { /* * We reserve the space for the old rotation summary * tables for the benefit of old kernels, but do not * maintain them in modern kernels. In time, they can * go away. */ newcg->cg_old_btotoff = basesize; newcg->cg_old_boff = newcg->cg_old_btotoff + fs->fs_old_cpg * sizeof(int32_t); newcg->cg_iusedoff = newcg->cg_old_boff + fs->fs_old_cpg * fs->fs_old_nrpos * sizeof(u_int16_t); memset(&newcg->cg_space[0], 0, newcg->cg_iusedoff - basesize); } inomapsize = howmany(fs->fs_ipg, CHAR_BIT); newcg->cg_freeoff = newcg->cg_iusedoff + inomapsize; blkmapsize = howmany(fs->fs_fpg, CHAR_BIT); newcg->cg_nextfreeoff = newcg->cg_freeoff + blkmapsize; if (fs->fs_contigsumsize > 0) { newcg->cg_clustersumoff = newcg->cg_nextfreeoff - sizeof(u_int32_t); if (isappleufs) { /* Apple PR2216969 gives rationale for this change. * I believe they were mistaken, but we need to * duplicate it for compatibility. -- [email protected] */ newcg->cg_clustersumoff += sizeof(u_int32_t); } newcg->cg_clustersumoff = roundup(newcg->cg_clustersumoff, sizeof(u_int32_t)); newcg->cg_clusteroff = newcg->cg_clustersumoff + (fs->fs_contigsumsize + 1) * sizeof(u_int32_t); newcg->cg_nextfreeoff = newcg->cg_clusteroff + howmany(fragstoblks(fs, fs->fs_fpg), CHAR_BIT); } newcg->cg_magic = CG_MAGIC; mapsize = newcg->cg_nextfreeoff - newcg->cg_iusedoff; if (!is_ufs2 && ((fs->fs_old_flags & FS_FLAGS_UPDATED) == 0)) { switch ((int)fs->fs_old_postblformat) { case FS_42POSTBLFMT: basesize = (char *)(&ocg->cg_btot[0]) - (char *)(&ocg->cg_firstfield); sumsize = &ocg->cg_iused[0] - (u_int8_t *)(&ocg->cg_btot[0]); mapsize = &ocg->cg_free[howmany(fs->fs_fpg, NBBY)] - (u_char *)&ocg->cg_iused[0]; blkmapsize = howmany(fs->fs_fpg, NBBY); inomapsize = &ocg->cg_free[0] - (u_char *)&ocg->cg_iused[0]; ocg->cg_magic = CG_MAGIC; newcg->cg_magic = 0; break; case FS_DYNAMICPOSTBLFMT: sumsize = newcg->cg_iusedoff - newcg->cg_old_btotoff; break; default: errexit("UNKNOWN ROTATIONAL TABLE FORMAT %d", fs->fs_old_postblformat); } } memset(&idesc[0], 0, sizeof idesc); for (i = 0; i < 4; i++) { idesc[i].id_type = ADDR; if (!is_ufs2 && doinglevel2) idesc[i].id_fix = FIX; } memset(&cstotal, 0, sizeof(struct csum_total)); dmax = blknum(fs, fs->fs_size + fs->fs_frag - 1); for (d = fs->fs_size; d < dmax; d++) setbmap(d); for (c = 0; c < fs->fs_ncg; c++) { if (got_siginfo) { fprintf(stderr, "%s: phase 5: cyl group %d of %d (%d%%)\n", cdevname(), c, fs->fs_ncg, c * 100 / fs->fs_ncg); got_siginfo = 0; } #ifdef PROGRESS progress_bar(cdevname(), preen ? NULL : "phase 5", c, fs->fs_ncg); #endif /* PROGRESS */ getblk(&cgblk, cgtod(fs, c), fs->fs_cgsize); memcpy(cg, cgblk.b_un.b_cg, fs->fs_cgsize); if((doswap && !needswap) || (!doswap && needswap)) ffs_cg_swap(cgblk.b_un.b_cg, cg, sblock); if (!doinglevel1 && !cg_chkmagic(cg, 0)) pfatal("CG %d: PASS5: BAD MAGIC NUMBER\n", c); if(doswap) cgdirty(); /* * While we have the disk head where we want it, * write back the superblock to the spare at this * cylinder group. */ if ((cvtlevel && sblk.b_dirty) || doswap) { bwrite(fswritefd, sblk.b_un.b_buf, fsbtodb(sblock, cgsblock(sblock, c)), sblock->fs_sbsize); } else { /* * Read in the current alternate superblock, * and compare it to the master. If it's * wrong, fix it up. */ getblk(&asblk, cgsblock(sblock, c), sblock->fs_sbsize); if (asblk.b_errs) pfatal("CG %d: UNABLE TO READ ALTERNATE " "SUPERBLK\n", c); else { memmove(altsblock, asblk.b_un.b_fs, sblock->fs_sbsize); if (needswap) ffs_sb_swap(asblk.b_un.b_fs, altsblock); } sb_oldfscompat_write(sblock, sblocksave); if ((asblk.b_errs || cmpsblks(sblock, altsblock)) && dofix(&idesc[3], "ALTERNATE SUPERBLK(S) ARE INCORRECT")) { bwrite(fswritefd, sblk.b_un.b_buf, fsbtodb(sblock, cgsblock(sblock, c)), sblock->fs_sbsize); } sb_oldfscompat_read(sblock, 0); } dbase = cgbase(fs, c); dmax = dbase + fs->fs_fpg; if (dmax > fs->fs_size) dmax = fs->fs_size; if (is_ufs2 || (fs->fs_old_flags & FS_FLAGS_UPDATED)) newcg->cg_time = cg->cg_time; newcg->cg_old_time = cg->cg_old_time; newcg->cg_cgx = c; newcg->cg_ndblk = dmax - dbase; if (!is_ufs2) { if (c == fs->fs_ncg - 1) { /* Avoid fighting old fsck for this value. Its never used * outside of this check anyway. */ if ((fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) newcg->cg_old_ncyl = fs->fs_old_ncyl % fs->fs_old_cpg; else newcg->cg_old_ncyl = howmany(newcg->cg_ndblk, fs->fs_fpg / fs->fs_old_cpg); } else newcg->cg_old_ncyl = fs->fs_old_cpg; newcg->cg_old_niblk = fs->fs_ipg; newcg->cg_niblk = 0; } if (fs->fs_contigsumsize > 0) newcg->cg_nclusterblks = newcg->cg_ndblk / fs->fs_frag; newcg->cg_cs.cs_ndir = 0; newcg->cg_cs.cs_nffree = 0; newcg->cg_cs.cs_nbfree = 0; newcg->cg_cs.cs_nifree = fs->fs_ipg; if (cg->cg_rotor >= 0 && cg->cg_rotor < newcg->cg_ndblk) newcg->cg_rotor = cg->cg_rotor; else newcg->cg_rotor = 0; if (cg->cg_frotor >= 0 && cg->cg_frotor < newcg->cg_ndblk) newcg->cg_frotor = cg->cg_frotor; else newcg->cg_frotor = 0; if (cg->cg_irotor >= 0 && cg->cg_irotor < fs->fs_ipg) newcg->cg_irotor = cg->cg_irotor; else newcg->cg_irotor = 0; if (!is_ufs2) { newcg->cg_initediblk = 0; } else { if ((unsigned)cg->cg_initediblk > fs->fs_ipg) newcg->cg_initediblk = fs->fs_ipg; else newcg->cg_initediblk = cg->cg_initediblk; } memset(&newcg->cg_frsum[0], 0, sizeof newcg->cg_frsum); memset(&old_cg_blktot(newcg, 0)[0], 0, (size_t)(sumsize)); memset(cg_inosused(newcg, 0), 0, (size_t)(mapsize)); if (!is_ufs2 && ((fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) && fs->fs_old_postblformat == FS_42POSTBLFMT) ocg->cg_magic = CG_MAGIC; j = fs->fs_ipg * c; for (i = 0; i < fs->fs_ipg; j++, i++) { info = inoinfo(j); switch (info->ino_state) { case USTATE: break; case DSTATE: case DCLEAR: case DFOUND: newcg->cg_cs.cs_ndir++; /* fall through */ case FSTATE: case FCLEAR: newcg->cg_cs.cs_nifree--; setbit(cg_inosused(newcg, 0), i); break; default: if (j < ROOTINO) break; errexit("BAD STATE %d FOR INODE I=%ld", info->ino_state, (long)j); } } if (c == 0) for (i = 0; i < ROOTINO; i++) { setbit(cg_inosused(newcg, 0), i); newcg->cg_cs.cs_nifree--; } for (i = 0, d = dbase; d < dmax; d += fs->fs_frag, i += fs->fs_frag) { frags = 0; for (j = 0; j < fs->fs_frag; j++) { if (testbmap(d + j)) continue; setbit(cg_blksfree(newcg, 0), i + j); frags++; } if (frags == fs->fs_frag) { newcg->cg_cs.cs_nbfree++; if (sumsize) { j = old_cbtocylno(fs, i); old_cg_blktot(newcg, 0)[j]++; old_cg_blks(fs, newcg, j, 0)[old_cbtorpos(fs, i)]++; } if (fs->fs_contigsumsize > 0) setbit(cg_clustersfree(newcg, 0), fragstoblks(fs, i)); } else if (frags > 0) { newcg->cg_cs.cs_nffree += frags; blk = blkmap(fs, cg_blksfree(newcg, 0), i); ffs_fragacct(fs, blk, newcg->cg_frsum, 1, 0); } } if (fs->fs_contigsumsize > 0) { int32_t *sump = cg_clustersum(newcg, 0); u_char *mapp = cg_clustersfree(newcg, 0); int map = *mapp++; int bit = 1; int run = 0; for (i = 0; i < newcg->cg_nclusterblks; i++) { if ((map & bit) != 0) { run++; } else if (run != 0) { if (run > fs->fs_contigsumsize) run = fs->fs_contigsumsize; sump[run]++; run = 0; } if ((i & (NBBY - 1)) != (NBBY - 1)) { bit <<= 1; } else { map = *mapp++; bit = 1; } } if (run != 0) { if (run > fs->fs_contigsumsize) run = fs->fs_contigsumsize; sump[run]++; } } cstotal.cs_nffree += newcg->cg_cs.cs_nffree; cstotal.cs_nbfree += newcg->cg_cs.cs_nbfree; cstotal.cs_nifree += newcg->cg_cs.cs_nifree; cstotal.cs_ndir += newcg->cg_cs.cs_ndir; cs = &fs->fs_cs(fs, c); if (memcmp(&newcg->cg_cs, cs, sizeof *cs) != 0) { if (debug) { printf("cg %d: nffree: %d/%d nbfree %d/%d" " nifree %d/%d ndir %d/%d\n", c, cs->cs_nffree,newcg->cg_cs.cs_nffree, cs->cs_nbfree,newcg->cg_cs.cs_nbfree, cs->cs_nifree,newcg->cg_cs.cs_nifree, cs->cs_ndir,newcg->cg_cs.cs_ndir); } if (dofix(&idesc[0], "FREE BLK COUNT(S) WRONG IN SUPERBLK")) { memmove(cs, &newcg->cg_cs, sizeof *cs); sbdirty(); } else markclean = 0; } if (doinglevel1) { memmove(cg, newcg, (size_t)fs->fs_cgsize); cgdirty(); continue; } if ((memcmp(newcg, cg, basesize) != 0) || (memcmp(&old_cg_blktot(newcg, 0)[0], &old_cg_blktot(cg, 0)[0], sumsize) != 0)) { if (dofix(&idesc[2], "SUMMARY INFORMATION BAD")) { memmove(cg, newcg, (size_t)basesize); memmove(&old_cg_blktot(cg, 0)[0], &old_cg_blktot(newcg, 0)[0], (size_t)sumsize); cgdirty(); } else markclean = 0; } if (usedsoftdep) { for (i = 0; i < inomapsize; i++) { j = cg_inosused(newcg, 0)[i]; if ((cg_inosused(cg, 0)[i] & j) == j) continue; for (k = 0; k < NBBY; k++) { if ((j & (1 << k)) == 0) continue; if (cg_inosused(cg, 0)[i] & (1 << k)) continue; pwarn("ALLOCATED INODE %ld " "MARKED FREE\n", c * fs->fs_ipg + i * 8 + k); } } for (i = 0; i < blkmapsize; i++) { j = cg_blksfree(cg, 0)[i]; if ((cg_blksfree(newcg, 0)[i] & j) == j) continue; for (k = 0; k < NBBY; k++) { if ((j & (1 << k)) == 0) continue; if (cg_inosused(cg, 0)[i] & (1 << k)) continue; pwarn("ALLOCATED FRAG %ld " "MARKED FREE\n", c * fs->fs_fpg + i * 8 + k); } } } if (memcmp(cg_inosused(newcg, 0), cg_inosused(cg, 0), mapsize) != 0 && dofix(&idesc[1], "BLK(S) MISSING IN BIT MAPS")) { memmove(cg_inosused(cg, 0), cg_inosused(newcg, 0), (size_t)mapsize); cgdirty(); } } if (memcmp(&cstotal, &fs->fs_cstotal, cssize) != 0) { if (debug) { printf("total: nffree: %lld/%lld nbfree %lld/%lld" " nifree %lld/%lld ndir %lld/%lld\n", (long long int)fs->fs_cstotal.cs_nffree, (long long int)cstotal.cs_nffree, (long long int)fs->fs_cstotal.cs_nbfree, (long long int)cstotal.cs_nbfree, (long long int)fs->fs_cstotal.cs_nifree, (long long int)cstotal.cs_nifree, (long long int)fs->fs_cstotal.cs_ndir, (long long int)cstotal.cs_ndir); } if (dofix(&idesc[0], "FREE BLK COUNT(S) WRONG IN SUPERBLK")) { memmove(&fs->fs_cstotal, &cstotal, sizeof cstotal); fs->fs_ronly = 0; fs->fs_fmod = 0; sbdirty(); } else markclean = 0; } #ifdef PROGRESS if (!preen) progress_done(); #endif /* PROGRESS */ }
/* ARGSUSED */ static int checkfilesys(char *filesys) { ufs2_daddr_t n_ffree, n_bfree; struct dups *dp; int cylno; intmax_t blks, files; check_filename = filesys; if (check_debug && check_clean) check_warn("starting\n"); check_sblock_init(); if (check_clean && check_skipclean) { /* * If file system is gjournaled, check it here. */ if ((check_fsreadfd = open(filesys, O_RDONLY)) < 0 || check_readsb(0) == 0) exit(3); /* Cannot read superblock */ close(check_fsreadfd); if ((check_sblk.b_un.b_fs->fs_flags & FS_GJOURNAL) != 0) { //printf("GJournaled file system detected on %s.\n", // filesys); if (check_sblk.b_un.b_fs->fs_clean == 1) { check_warn("FILE SYSTEM CLEAN; SKIPPING CHECKS\n"); exit(0); } if ((check_sblk.b_un.b_fs->fs_flags & (FS_UNCLEAN | FS_NEEDSFSCK)) == 0) { check_gjournal(filesys); exit(0); } else { check_fatal( "UNEXPECTED INCONSISTENCY, CANNOT RUN FAST FSCK\n"); } } } switch (check_setup(filesys, 0)) { case 0: if (check_preen) check_fatal("CAN'T CHECK FILE SYSTEM."); return (0); case -1: check_warn("clean, %ld free ", (long)(check_sblk.b_un.b_fs->fs_cstotal.cs_nffree + check_sblk.b_un.b_fs->fs_frag * check_sblk.b_un.b_fs->fs_cstotal.cs_nbfree)); printf("(%jd frags, %jd blocks, %.1f%% fragmentation)\n", (intmax_t)check_sblk.b_un.b_fs->fs_cstotal.cs_nffree, (intmax_t)check_sblk.b_un.b_fs->fs_cstotal.cs_nbfree, check_sblk.b_un.b_fs->fs_cstotal.cs_nffree * 100.0 / check_sblk.b_un.b_fs->fs_dsize); return (0); } /* * Determine if we can and should do journal recovery. */ if ((check_sblk.b_un.b_fs->fs_flags & FS_SUJ) == FS_SUJ) { if ((check_sblk.b_un.b_fs->fs_flags & FS_NEEDSFSCK) != FS_NEEDSFSCK && check_skipclean) { if (check_preen || check_reply("USE JOURNAL")) { if (check_suj(filesys) == 0) { printf("\n***** FILE SYSTEM MARKED CLEAN *****\n"); exit(0); } } printf("** Skipping journal, falling through to full fsck\n\n"); } /* * Write the superblock so we don't try to recover the * journal on another pass. */ check_sblk.b_un.b_fs->fs_mtime = time(NULL); dirty(&check_sblk); } /* * Cleared if any questions answered no. Used to decide if * the superblock should be marked clean. */ check_resolved = 1; /* * 1: scan inodes tallying blocks used */ if (check_preen == 0) { printf("** Last Mounted on %s\n", check_sblk.b_un.b_fs->fs_fsmnt); printf("** Phase 1 - Check Blocks and Sizes\n"); } check_pass1(); /* * 1b: locate first references to duplicates, if any */ if (check_duplist) { if (check_preen || check_usedsoftdep) check_fatal("INTERNAL ERROR: dups with %s%s%s", check_preen ? "-p" : "", (check_preen && check_usedsoftdep) ? " and " : "", check_usedsoftdep ? "softupdates" : ""); printf("** Phase 1b - Rescan For More DUPS\n"); check_pass1b(); } /* * 2: traverse directories from root to mark all connected directories */ if (check_preen == 0) printf("** Phase 2 - Check Pathnames\n"); check_pass2(); /* * 3: scan inodes looking for disconnected directories */ if (check_preen == 0) printf("** Phase 3 - Check Connectivity\n"); check_pass3(); /* * 4: scan inodes looking for disconnected files; check reference counts */ if (check_preen == 0) printf("** Phase 4 - Check Reference Counts\n"); check_pass4(); /* * 5: check and repair resource counts in cylinder groups */ if (check_preen == 0) printf("** Phase 5 - Check Cyl groups\n"); check_pass5(); /* * print out summary statistics */ n_ffree = check_sblk.b_un.b_fs->fs_cstotal.cs_nffree; n_bfree = check_sblk.b_un.b_fs->fs_cstotal.cs_nbfree; files = check_maxino - ROOTINO - check_sblk.b_un.b_fs->fs_cstotal.cs_nifree - check_n_files; blks = check_n_blks + check_sblk.b_un.b_fs->fs_ncg * (cgdmin(check_sblk.b_un.b_fs, 0) - cgsblock(check_sblk.b_un.b_fs, 0)); blks += cgsblock(check_sblk.b_un.b_fs, 0) - cgbase(check_sblk.b_un.b_fs, 0); blks += howmany(check_sblk.b_un.b_fs->fs_cssize, check_sblk.b_un.b_fs->fs_fsize); blks = check_maxfsblock - (n_ffree + check_sblk.b_un.b_fs->fs_frag * n_bfree) - blks; check_warn("%ld files, %jd used, %ju free ", (long)check_n_files, (intmax_t)check_n_blks, (uintmax_t)(n_ffree + check_sblk.b_un.b_fs->fs_frag * n_bfree)); printf("(%ju frags, %ju blocks, %.1f%% fragmentation)\n", (uintmax_t)n_ffree, (uintmax_t)n_bfree, n_ffree * 100.0 / check_sblk.b_un.b_fs->fs_dsize); if (check_debug) { if (files < 0) printf("%jd inodes missing\n", -files); if (blks < 0) printf("%jd blocks missing\n", -blks); if (check_duplist != NULL) { printf("The following duplicate blocks remain:"); for (dp = check_duplist; dp; dp = dp->next) printf(" %jd,", (intmax_t)dp->dup); printf("\n"); } } check_duplist = (struct dups *)0; check_muldup = (struct dups *)0; check_inocleanup(); if (check_fsmodified) { check_sblk.b_un.b_fs->fs_time = time(NULL); dirty(&check_sblk); } if (check_cvtlevel && check_sblk.b_dirty) { /* * Write out the duplicate super blocks */ for (cylno = 0; cylno < check_sblk.b_un.b_fs->fs_ncg; cylno++) check_blwrite(check_fswritefd, (char *)check_sblk.b_un.b_fs, fsbtodb(check_sblk.b_un.b_fs, cgsblock(check_sblk.b_un.b_fs, cylno)), SBLOCKSIZE); } if (check_rerun) check_resolved = 0; /* * Check to see if the file system is mounted read-write. */ check_finish(check_resolved); for (cylno = 0; cylno < check_sblk.b_un.b_fs->fs_ncg; cylno++) if (check_inostathead[cylno].il_stat != NULL) free((char *)check_inostathead[cylno].il_stat); free((char *)check_inostathead); check_inostathead = NULL; if (check_fsmodified && !check_preen) printf("\n***** FILE SYSTEM WAS MODIFIED *****\n"); if (check_rerun) printf("\n***** PLEASE RERUN FSCK *****\n"); return (0); }
pass1() { register int c, i, j; register DINODE *dp; struct zlncnt *zlnp; int ndb, partial, cgd; struct inodesc idesc; ino_t inumber; /* * Set file system reserved blocks in used block map. */ for (c = 0; c < sblock.fs_ncg; c++) { cgd = cgdmin(&sblock, c); if (c == 0) { i = cgbase(&sblock, c); cgd += howmany(sblock.fs_cssize, sblock.fs_fsize); } else i = cgsblock(&sblock, c); for (; i < cgd; i++) setbmap(i); } /* * Find all allocated blocks. */ bzero((char *)&idesc, sizeof(struct inodesc)); idesc.id_type = ADDR; idesc.id_func = pass1check; inumber = 0; n_files = n_blks = 0; for (c = 0; c < sblock.fs_ncg; c++) { for (i = 0; i < sblock.fs_ipg; i++, inumber++) { if (inumber < ROOTINO) continue; dp = ginode(inumber); if (!ALLOC(dp)) { if (bcmp((char *)dp->di_db, (char *)zino.di_db, NDADDR * sizeof(daddr_t)) || bcmp((char *)dp->di_ib, (char *)zino.di_ib, NIADDR * sizeof(daddr_t)) || dp->di_mode || dp->di_size) { pfatal("PARTIALLY ALLOCATED INODE I=%u", inumber); if (reply("CLEAR") == 1) { zapino(dp); inodirty(); } } statemap[inumber] = USTATE; continue; } lastino = inumber; if (dp->di_size < 0 || dp->di_size + sblock.fs_bsize - 1 < 0) { if (debug) printf("bad size %d:", dp->di_size); goto unknown; } if (!preen && (dp->di_mode & IFMT) == IFMT && reply("HOLD BAD BLOCK") == 1) { dp->di_size = sblock.fs_fsize; dp->di_mode = IFREG|0600; inodirty(); } ndb = howmany(dp->di_size, sblock.fs_bsize); if (SPECIAL(dp)) ndb++; for (j = ndb; j < NDADDR; j++) if (dp->di_db[j] != 0) { if (debug) printf("bad direct addr: %d\n", dp->di_db[j]); goto unknown; } for (j = 0, ndb -= NDADDR; ndb > 0; j++) ndb /= NINDIR(&sblock); for (; j < NIADDR; j++) if (dp->di_ib[j] != 0) { if (debug) printf("bad indirect addr: %d\n", dp->di_ib[j]); goto unknown; } if (ftypeok(dp) == 0) goto unknown; n_files++; lncntp[inumber] = dp->di_nlink; if (dp->di_nlink <= 0) { zlnp = (struct zlncnt *)malloc(sizeof *zlnp); if (zlnp == NULL) { pfatal("LINK COUNT TABLE OVERFLOW"); if (reply("CONTINUE") == 0) errexit(""); } else { zlnp->zlncnt = inumber; zlnp->next = zlnhead; zlnhead = zlnp; } } statemap[inumber] = DIRCT(dp) ? DSTATE : FSTATE; badblk = dupblk = 0; maxblk = 0; idesc.id_number = inumber; (void)ckinode(dp, &idesc); idesc.id_entryno *= btodb(sblock.fs_fsize); if (dp->di_blocks != idesc.id_entryno) { pwarn("INCORRECT BLOCK COUNT I=%u (%ld should be %ld)", inumber, dp->di_blocks, idesc.id_entryno); if (preen) printf(" (CORRECTED)\n"); else if (reply("CORRECT") == 0) continue; dp->di_blocks = idesc.id_entryno; inodirty(); } continue; unknown: pfatal("UNKNOWN FILE TYPE I=%u", inumber); statemap[inumber] = FCLEAR; if (reply("CLEAR") == 1) { statemap[inumber] = USTATE; zapino(dp); inodirty(); } } } }
/* * Initialize a cylinder group. */ void initcg(int cylno, time_t utime) { long blkno, start; uint i, j, d, dlower, dupper; ufs2_daddr_t cbase, dmax; struct ufs1_dinode *dp1; struct ufs2_dinode *dp2; struct csum *cs; /* * Determine block bounds for cylinder group. * Allow space for super block summary information in first * cylinder group. */ cbase = cgbase(&sblock, cylno); dmax = cbase + sblock.fs_fpg; if (dmax > sblock.fs_size) dmax = sblock.fs_size; dlower = cgsblock(&sblock, cylno) - cbase; dupper = cgdmin(&sblock, cylno) - cbase; if (cylno == 0) dupper += howmany(sblock.fs_cssize, sblock.fs_fsize); cs = &fscs[cylno]; memset(&acg, 0, sblock.fs_cgsize); acg.cg_time = utime; acg.cg_magic = CG_MAGIC; acg.cg_cgx = cylno; acg.cg_niblk = sblock.fs_ipg; acg.cg_initediblk = sblock.fs_ipg < 2 * INOPB(&sblock) ? sblock.fs_ipg : 2 * INOPB(&sblock); acg.cg_ndblk = dmax - cbase; if (sblock.fs_contigsumsize > 0) acg.cg_nclusterblks = acg.cg_ndblk / sblock.fs_frag; start = &acg.cg_space[0] - (u_char *)(&acg.cg_firstfield); if (Oflag == 2) { acg.cg_iusedoff = start; } else { acg.cg_old_ncyl = sblock.fs_old_cpg; acg.cg_old_time = acg.cg_time; acg.cg_time = 0; acg.cg_old_niblk = acg.cg_niblk; acg.cg_niblk = 0; acg.cg_initediblk = 0; acg.cg_old_btotoff = start; acg.cg_old_boff = acg.cg_old_btotoff + sblock.fs_old_cpg * sizeof(int32_t); acg.cg_iusedoff = acg.cg_old_boff + sblock.fs_old_cpg * sizeof(u_int16_t); } acg.cg_freeoff = acg.cg_iusedoff + howmany(sblock.fs_ipg, CHAR_BIT); acg.cg_nextfreeoff = acg.cg_freeoff + howmany(sblock.fs_fpg, CHAR_BIT); if (sblock.fs_contigsumsize > 0) { acg.cg_clustersumoff = roundup(acg.cg_nextfreeoff, sizeof(u_int32_t)); acg.cg_clustersumoff -= sizeof(u_int32_t); acg.cg_clusteroff = acg.cg_clustersumoff + (sblock.fs_contigsumsize + 1) * sizeof(u_int32_t); acg.cg_nextfreeoff = acg.cg_clusteroff + howmany(fragstoblks(&sblock, sblock.fs_fpg), CHAR_BIT); } if (acg.cg_nextfreeoff > (unsigned)sblock.fs_cgsize) { printf("Panic: cylinder group too big\n"); exit(37); } acg.cg_cs.cs_nifree += sblock.fs_ipg; if (cylno == 0) for (i = 0; i < (long)ROOTINO; i++) { setbit(cg_inosused(&acg), i); acg.cg_cs.cs_nifree--; } if (cylno > 0) { /* * In cylno 0, beginning space is reserved * for boot and super blocks. */ for (d = 0; d < dlower; d += sblock.fs_frag) { blkno = d / sblock.fs_frag; setblock(&sblock, cg_blksfree(&acg), blkno); if (sblock.fs_contigsumsize > 0) setbit(cg_clustersfree(&acg), blkno); acg.cg_cs.cs_nbfree++; } } if ((i = dupper % sblock.fs_frag)) { acg.cg_frsum[sblock.fs_frag - i]++; for (d = dupper + sblock.fs_frag - i; dupper < d; dupper++) { setbit(cg_blksfree(&acg), dupper); acg.cg_cs.cs_nffree++; } } for (d = dupper; d + sblock.fs_frag <= acg.cg_ndblk; d += sblock.fs_frag) { blkno = d / sblock.fs_frag; setblock(&sblock, cg_blksfree(&acg), blkno); if (sblock.fs_contigsumsize > 0) setbit(cg_clustersfree(&acg), blkno); acg.cg_cs.cs_nbfree++; } if (d < acg.cg_ndblk) { acg.cg_frsum[acg.cg_ndblk - d]++; for (; d < acg.cg_ndblk; d++) { setbit(cg_blksfree(&acg), d); acg.cg_cs.cs_nffree++; } } if (sblock.fs_contigsumsize > 0) { int32_t *sump = cg_clustersum(&acg); u_char *mapp = cg_clustersfree(&acg); int map = *mapp++; int bit = 1; int run = 0; for (i = 0; i < acg.cg_nclusterblks; i++) { if ((map & bit) != 0) run++; else if (run != 0) { if (run > sblock.fs_contigsumsize) run = sblock.fs_contigsumsize; sump[run]++; run = 0; } if ((i & (CHAR_BIT - 1)) != CHAR_BIT - 1) bit <<= 1; else { map = *mapp++; bit = 1; } } if (run != 0) { if (run > sblock.fs_contigsumsize) run = sblock.fs_contigsumsize; sump[run]++; } } *cs = acg.cg_cs; /* * Write out the duplicate super block, the cylinder group map * and two blocks worth of inodes in a single write. */ start = sblock.fs_bsize > SBLOCKSIZE ? sblock.fs_bsize : SBLOCKSIZE; bcopy((char *)&acg, &iobuf[start], sblock.fs_cgsize); start += sblock.fs_bsize; dp1 = (struct ufs1_dinode *)(&iobuf[start]); dp2 = (struct ufs2_dinode *)(&iobuf[start]); for (i = 0; i < acg.cg_initediblk; i++) { if (sblock.fs_magic == FS_UFS1_MAGIC) { dp1->di_gen = newfs_random(); dp1++; } else { dp2->di_gen = newfs_random(); dp2++; } } wtfs(fsbtodb(&sblock, cgsblock(&sblock, cylno)), iobufsize, iobuf); /* * For the old file system, we have to initialize all the inodes. */ if (Oflag == 1) { for (i = 2 * sblock.fs_frag; i < sblock.fs_ipg / INOPF(&sblock); i += sblock.fs_frag) { dp1 = (struct ufs1_dinode *)(&iobuf[start]); for (j = 0; j < INOPB(&sblock); j++) { dp1->di_gen = newfs_random(); dp1++; } wtfs(fsbtodb(&sblock, cgimin(&sblock, cylno) + i), sblock.fs_bsize, &iobuf[start]); } } }
/* * Find a suitable location for the journal in the filesystem. * * Our strategy here is to look for a contiguous block of free space * at least "logfile" MB in size (plus room for any indirect blocks). * We start at the middle of the filesystem and check each cylinder * group working outwards. If "logfile" MB is not available as a * single contigous chunk, then return the address and size of the * largest chunk found. * * XXX * At what stage does the search fail? Is if the largest space we could * find is less than a quarter the requested space reasonable? If the * search fails entirely, return a block address if "0" it indicate this. */ void wapbl_find_log_start(struct mount *mp, struct vnode *vp, off_t logsize, daddr_t *addr, daddr_t *indir_addr, size_t *size) { struct ufsmount *ump = VFSTOUFS(mp); struct fs *fs = ump->um_fs; struct vnode *devvp = ump->um_devvp; struct cg *cgp; struct buf *bp; uint8_t *blksfree; daddr_t blkno, best_addr, start_addr; daddr_t desired_blks, min_desired_blks; daddr_t freeblks, best_blks; int bpcg, cg, error, fixedsize, indir_blks, n, s; #ifdef FFS_EI const int needswap = UFS_FSNEEDSWAP(fs); #endif if (logsize == 0) { fixedsize = 0; /* We can adjust the size if tight */ logsize = lfragtosize(fs, fs->fs_dsize) / UFS_WAPBL_JOURNAL_SCALE; DPRINTF("suggested log size = %lld\n", logsize); logsize = max(logsize, UFS_WAPBL_MIN_JOURNAL_SIZE); logsize = min(logsize, UFS_WAPBL_MAX_JOURNAL_SIZE); DPRINTF("adjusted log size = %lld\n", logsize); } else { fixedsize = 1; DPRINTF("fixed log size = %lld\n", logsize); } desired_blks = logsize / fs->fs_bsize; DPRINTF("desired blocks = %lld\n", desired_blks); /* add in number of indirect blocks needed */ indir_blks = 0; if (desired_blks >= NDADDR) { struct indir indirs[NIADDR + 2]; int num; error = ufs_getlbns(vp, desired_blks, indirs, &num); if (error) { printf("%s: ufs_getlbns failed, error %d!\n", __func__, error); goto bad; } switch (num) { case 2: indir_blks = 1; /* 1st level indirect */ break; case 3: indir_blks = 1 + /* 1st level indirect */ 1 + /* 2nd level indirect */ indirs[1].in_off + 1; /* extra 1st level indirect */ break; default: printf("%s: unexpected numlevels %d from ufs_getlbns\n", __func__, num); *size = 0; goto bad; } desired_blks += indir_blks; } DPRINTF("desired blocks = %lld (including indirect)\n", desired_blks); /* * If a specific size wasn't requested, allow for a smaller log * if we're really tight for space... */ min_desired_blks = desired_blks; if (!fixedsize) min_desired_blks = desired_blks / 4; /* Look at number of blocks per CG. If it's too small, bail early. */ bpcg = fragstoblks(fs, fs->fs_fpg); if (min_desired_blks > bpcg) { printf("ffs_wapbl: cylinder group size of %lld MB " " is not big enough for journal\n", lblktosize(fs, bpcg) / (1024 * 1024)); goto bad; } /* * Start with the middle cylinder group, and search outwards in * both directions until we either find the requested log size * or reach the start/end of the file system. If we reach the * start/end without finding enough space for the full requested * log size, use the largest extent found if it is large enough * to satisfy the our minimum size. * * XXX * Can we just use the cluster contigsum stuff (esp on UFS2) * here to simplify this search code? */ best_addr = 0; best_blks = 0; for (cg = fs->fs_ncg / 2, s = 0, n = 1; best_blks < desired_blks && cg >= 0 && cg < fs->fs_ncg; s++, n = -n, cg += n * s) { DPRINTF("check cg %d of %d\n", cg, fs->fs_ncg); error = bread(devvp, fsbtodb(fs, cgtod(fs, cg)), fs->fs_cgsize, &bp); if (error) { continue; } cgp = (struct cg *)bp->b_data; if (!cg_chkmagic(cgp)) { brelse(bp); continue; } blksfree = cg_blksfree(cgp); for (blkno = 0; blkno < bpcg;) { /* look for next free block */ /* XXX use scanc() and fragtbl[] here? */ for (; blkno < bpcg - min_desired_blks; blkno++) if (ffs_isblock(fs, blksfree, blkno)) break; /* past end of search space in this CG? */ if (blkno >= bpcg - min_desired_blks) break; /* count how many free blocks in this extent */ start_addr = blkno; for (freeblks = 0; blkno < bpcg; blkno++, freeblks++) if (!ffs_isblock(fs, blksfree, blkno)) break; if (freeblks > best_blks) { best_blks = freeblks; best_addr = blkstofrags(fs, start_addr) + cgbase(fs, cg); if (freeblks >= desired_blks) { DPRINTF("found len %lld" " at offset %lld in gc\n", freeblks, start_addr); break; } } } brelse(bp); } DPRINTF("best found len = %lld, wanted %lld" " at addr %lld\n", best_blks, desired_blks, best_addr); if (best_blks < min_desired_blks) { *addr = 0; *indir_addr = 0; } else { /* put indirect blocks at start, and data blocks after */ *addr = best_addr + blkstofrags(fs, indir_blks); *indir_addr = best_addr; } *size = min(desired_blks, best_blks) - indir_blks; return; bad: *addr = 0; *indir_addr = 0; *size = 0; return; }
/* * Initialize a cylinder group. */ static void initcg(int cylno, time_t utime, const fsinfo_t *fsopts) { daddr_t cbase, dmax; int32_t i, j, d, dlower, dupper, blkno; struct ufs1_dinode *dp1; struct ufs2_dinode *dp2; int start; /* * Determine block bounds for cylinder group. * Allow space for super block summary information in first * cylinder group. */ cbase = cgbase(&sblock, cylno); dmax = cbase + sblock.fs_fpg; if (dmax > sblock.fs_size) dmax = sblock.fs_size; dlower = cgsblock(&sblock, cylno) - cbase; dupper = cgdmin(&sblock, cylno) - cbase; if (cylno == 0) dupper += howmany(sblock.fs_cssize, sblock.fs_fsize); memset(&acg, 0, sblock.fs_cgsize); acg.cg_time = utime; acg.cg_magic = CG_MAGIC; acg.cg_cgx = cylno; acg.cg_niblk = sblock.fs_ipg; acg.cg_initediblk = MIN(sblock.fs_ipg, 2 * INOPB(&sblock)); acg.cg_ndblk = dmax - cbase; if (sblock.fs_contigsumsize > 0) acg.cg_nclusterblks = acg.cg_ndblk >> sblock.fs_fragshift; start = &acg.cg_space[0] - (u_char *)(&acg.cg_firstfield); if (Oflag == 2) { acg.cg_iusedoff = start; } else { if (cylno == sblock.fs_ncg - 1) acg.cg_old_ncyl = howmany(acg.cg_ndblk, sblock.fs_fpg / sblock.fs_old_cpg); else acg.cg_old_ncyl = sblock.fs_old_cpg; acg.cg_old_time = acg.cg_time; acg.cg_time = 0; acg.cg_old_niblk = acg.cg_niblk; acg.cg_niblk = 0; acg.cg_initediblk = 0; acg.cg_old_btotoff = start; acg.cg_old_boff = acg.cg_old_btotoff + sblock.fs_old_cpg * sizeof(int32_t); acg.cg_iusedoff = acg.cg_old_boff + sblock.fs_old_cpg * sizeof(u_int16_t); } acg.cg_freeoff = acg.cg_iusedoff + howmany(sblock.fs_ipg, CHAR_BIT); if (sblock.fs_contigsumsize <= 0) { acg.cg_nextfreeoff = acg.cg_freeoff + howmany(sblock.fs_fpg, CHAR_BIT); } else { acg.cg_clustersumoff = acg.cg_freeoff + howmany(sblock.fs_fpg, CHAR_BIT) - sizeof(int32_t); acg.cg_clustersumoff = roundup(acg.cg_clustersumoff, sizeof(int32_t)); acg.cg_clusteroff = acg.cg_clustersumoff + (sblock.fs_contigsumsize + 1) * sizeof(int32_t); acg.cg_nextfreeoff = acg.cg_clusteroff + howmany(fragstoblks(&sblock, sblock.fs_fpg), CHAR_BIT); } if (acg.cg_nextfreeoff > sblock.fs_cgsize) { printf("Panic: cylinder group too big\n"); exit(37); } acg.cg_cs.cs_nifree += sblock.fs_ipg; if (cylno == 0) for (i = 0; i < ROOTINO; i++) { setbit(cg_inosused_swap(&acg, 0), i); acg.cg_cs.cs_nifree--; } if (cylno > 0) { /* * In cylno 0, beginning space is reserved * for boot and super blocks. */ for (d = 0, blkno = 0; d < dlower;) { ffs_setblock(&sblock, cg_blksfree_swap(&acg, 0), blkno); if (sblock.fs_contigsumsize > 0) setbit(cg_clustersfree_swap(&acg, 0), blkno); acg.cg_cs.cs_nbfree++; d += sblock.fs_frag; blkno++; } } if ((i = (dupper & (sblock.fs_frag - 1))) != 0) { acg.cg_frsum[sblock.fs_frag - i]++; for (d = dupper + sblock.fs_frag - i; dupper < d; dupper++) { setbit(cg_blksfree_swap(&acg, 0), dupper); acg.cg_cs.cs_nffree++; } } for (d = dupper, blkno = dupper >> sblock.fs_fragshift; d + sblock.fs_frag <= acg.cg_ndblk; ) {
void pass5(void) { int c, i, j, blk, frags, basesize, mapsize; int inomapsize, blkmapsize; struct fs *fs = &sblock; ufs2_daddr_t d, dbase, dmax, start; int rewritecg = 0; struct csum *cs; struct csum_total cstotal; struct inodesc idesc[3]; char buf[MAXBSIZE]; struct cg *cg, *newcg = (struct cg *)buf; struct bufarea *cgbp; inoinfo(WINO)->ino_state = USTATE; memset(newcg, 0, (size_t)fs->fs_cgsize); newcg->cg_niblk = fs->fs_ipg; if (cvtlevel >= 3) { if (fs->fs_maxcontig < 2 && fs->fs_contigsumsize > 0) { if (preen) pwarn("DELETING CLUSTERING MAPS\n"); if (preen || reply("DELETE CLUSTERING MAPS")) { fs->fs_contigsumsize = 0; rewritecg = 1; sbdirty(); } } if (fs->fs_maxcontig > 1) { const char *doit = 0; if (fs->fs_contigsumsize < 1) { doit = "CREAT"; } else if (fs->fs_contigsumsize < fs->fs_maxcontig && fs->fs_contigsumsize < FS_MAXCONTIG) { doit = "EXPAND"; } if (doit) { i = fs->fs_contigsumsize; fs->fs_contigsumsize = MIN(fs->fs_maxcontig, FS_MAXCONTIG); if (CGSIZE(fs) > (u_int)fs->fs_bsize) { pwarn("CANNOT %s CLUSTER MAPS\n", doit); fs->fs_contigsumsize = i; } else if (preen || reply("CREATE CLUSTER MAPS")) { if (preen) pwarn("%sING CLUSTER MAPS\n", doit); fs->fs_cgsize = fragroundup(fs, CGSIZE(fs)); rewritecg = 1; sbdirty(); } } } } basesize = &newcg->cg_space[0] - (u_char *)(&newcg->cg_firstfield); if (sblock.fs_magic == FS_UFS2_MAGIC) { newcg->cg_iusedoff = basesize; } else { /* * We reserve the space for the old rotation summary * tables for the benefit of old kernels, but do not * maintain them in modern kernels. In time, they can * go away. */ newcg->cg_old_btotoff = basesize; newcg->cg_old_boff = newcg->cg_old_btotoff + fs->fs_old_cpg * sizeof(int32_t); newcg->cg_iusedoff = newcg->cg_old_boff + fs->fs_old_cpg * fs->fs_old_nrpos * sizeof(u_int16_t); memset(&newcg->cg_space[0], 0, newcg->cg_iusedoff - basesize); } inomapsize = howmany(fs->fs_ipg, CHAR_BIT); newcg->cg_freeoff = newcg->cg_iusedoff + inomapsize; blkmapsize = howmany(fs->fs_fpg, CHAR_BIT); newcg->cg_nextfreeoff = newcg->cg_freeoff + blkmapsize; if (fs->fs_contigsumsize > 0) { newcg->cg_clustersumoff = newcg->cg_nextfreeoff - sizeof(u_int32_t); newcg->cg_clustersumoff = roundup(newcg->cg_clustersumoff, sizeof(u_int32_t)); newcg->cg_clusteroff = newcg->cg_clustersumoff + (fs->fs_contigsumsize + 1) * sizeof(u_int32_t); newcg->cg_nextfreeoff = newcg->cg_clusteroff + howmany(fragstoblks(fs, fs->fs_fpg), CHAR_BIT); } newcg->cg_magic = CG_MAGIC; mapsize = newcg->cg_nextfreeoff - newcg->cg_iusedoff; memset(&idesc[0], 0, sizeof idesc); for (i = 0; i < 3; i++) idesc[i].id_type = ADDR; memset(&cstotal, 0, sizeof(struct csum_total)); dmax = blknum(fs, fs->fs_size + fs->fs_frag - 1); for (d = fs->fs_size; d < dmax; d++) setbmap(d); for (c = 0; c < fs->fs_ncg; c++) { if (got_siginfo) { printf("%s: phase 5: cyl group %d of %d (%d%%)\n", cdevname, c, sblock.fs_ncg, c * 100 / sblock.fs_ncg); got_siginfo = 0; } if (got_sigalarm) { setproctitle("%s p5 %d%%", cdevname, c * 100 / sblock.fs_ncg); got_sigalarm = 0; } cgbp = cgget(c); cg = cgbp->b_un.b_cg; if (!cg_chkmagic(cg)) pfatal("CG %d: BAD MAGIC NUMBER\n", c); newcg->cg_time = cg->cg_time; newcg->cg_old_time = cg->cg_old_time; newcg->cg_unrefs = cg->cg_unrefs; newcg->cg_cgx = c; dbase = cgbase(fs, c); dmax = dbase + fs->fs_fpg; if (dmax > fs->fs_size) dmax = fs->fs_size; newcg->cg_ndblk = dmax - dbase; if (fs->fs_magic == FS_UFS1_MAGIC) { if (c == fs->fs_ncg - 1) newcg->cg_old_ncyl = howmany(newcg->cg_ndblk, fs->fs_fpg / fs->fs_old_cpg); else newcg->cg_old_ncyl = fs->fs_old_cpg; newcg->cg_old_niblk = fs->fs_ipg; newcg->cg_niblk = 0; } if (fs->fs_contigsumsize > 0) newcg->cg_nclusterblks = newcg->cg_ndblk / fs->fs_frag; newcg->cg_cs.cs_ndir = 0; newcg->cg_cs.cs_nffree = 0; newcg->cg_cs.cs_nbfree = 0; newcg->cg_cs.cs_nifree = fs->fs_ipg; if (cg->cg_rotor >= 0 && cg->cg_rotor < newcg->cg_ndblk) newcg->cg_rotor = cg->cg_rotor; else newcg->cg_rotor = 0; if (cg->cg_frotor >= 0 && cg->cg_frotor < newcg->cg_ndblk) newcg->cg_frotor = cg->cg_frotor; else newcg->cg_frotor = 0; if (cg->cg_irotor >= 0 && cg->cg_irotor < fs->fs_ipg) newcg->cg_irotor = cg->cg_irotor; else newcg->cg_irotor = 0; if (fs->fs_magic == FS_UFS1_MAGIC) { newcg->cg_initediblk = 0; } else { if ((unsigned)cg->cg_initediblk > fs->fs_ipg) newcg->cg_initediblk = fs->fs_ipg; else newcg->cg_initediblk = cg->cg_initediblk; } memset(&newcg->cg_frsum[0], 0, sizeof newcg->cg_frsum); memset(cg_inosused(newcg), 0, (size_t)(mapsize)); j = fs->fs_ipg * c; for (i = 0; i < inostathead[c].il_numalloced; j++, i++) { switch (inoinfo(j)->ino_state) { case USTATE: break; case DSTATE: case DCLEAR: case DFOUND: case DZLINK: newcg->cg_cs.cs_ndir++; /* FALLTHROUGH */ case FSTATE: case FCLEAR: case FZLINK: newcg->cg_cs.cs_nifree--; setbit(cg_inosused(newcg), i); break; default: if (j < (int)ROOTINO) break; errx(EEXIT, "BAD STATE %d FOR INODE I=%d", inoinfo(j)->ino_state, j); } } if (c == 0) for (i = 0; i < (int)ROOTINO; i++) { setbit(cg_inosused(newcg), i); newcg->cg_cs.cs_nifree--; } start = -1; for (i = 0, d = dbase; d < dmax; d += fs->fs_frag, i += fs->fs_frag) { frags = 0; for (j = 0; j < fs->fs_frag; j++) { if (testbmap(d + j)) { if (Eflag && start != -1) { clear_blocks(start, d + j - 1); start = -1; } continue; } if (start == -1) start = d + j; setbit(cg_blksfree(newcg), i + j); frags++; } if (frags == fs->fs_frag) { newcg->cg_cs.cs_nbfree++; if (fs->fs_contigsumsize > 0) setbit(cg_clustersfree(newcg), i / fs->fs_frag); } else if (frags > 0) { newcg->cg_cs.cs_nffree += frags; blk = blkmap(fs, cg_blksfree(newcg), i); ffs_fragacct(fs, blk, newcg->cg_frsum, 1); } } if (Eflag && start != -1) clear_blocks(start, d - 1); if (fs->fs_contigsumsize > 0) { int32_t *sump = cg_clustersum(newcg); u_char *mapp = cg_clustersfree(newcg); int map = *mapp++; int bit = 1; int run = 0; for (i = 0; i < newcg->cg_nclusterblks; i++) { if ((map & bit) != 0) { run++; } else if (run != 0) { if (run > fs->fs_contigsumsize) run = fs->fs_contigsumsize; sump[run]++; run = 0; } if ((i & (CHAR_BIT - 1)) != (CHAR_BIT - 1)) { bit <<= 1; } else { map = *mapp++; bit = 1; } } if (run != 0) { if (run > fs->fs_contigsumsize) run = fs->fs_contigsumsize; sump[run]++; } } if (bkgrdflag != 0) { cstotal.cs_nffree += cg->cg_cs.cs_nffree; cstotal.cs_nbfree += cg->cg_cs.cs_nbfree; cstotal.cs_nifree += cg->cg_cs.cs_nifree; cstotal.cs_ndir += cg->cg_cs.cs_ndir; } else { cstotal.cs_nffree += newcg->cg_cs.cs_nffree; cstotal.cs_nbfree += newcg->cg_cs.cs_nbfree; cstotal.cs_nifree += newcg->cg_cs.cs_nifree; cstotal.cs_ndir += newcg->cg_cs.cs_ndir; } cs = &fs->fs_cs(fs, c); if (cursnapshot == 0 && memcmp(&newcg->cg_cs, cs, sizeof *cs) != 0 && dofix(&idesc[0], "FREE BLK COUNT(S) WRONG IN SUPERBLK")) { memmove(cs, &newcg->cg_cs, sizeof *cs); sbdirty(); } if (rewritecg) { memmove(cg, newcg, (size_t)fs->fs_cgsize); dirty(cgbp); continue; } if (cursnapshot == 0 && memcmp(newcg, cg, basesize) != 0 && dofix(&idesc[2], "SUMMARY INFORMATION BAD")) { memmove(cg, newcg, (size_t)basesize); dirty(cgbp); } if (bkgrdflag != 0 || usedsoftdep || debug) update_maps(cg, newcg, bkgrdflag); if (cursnapshot == 0 && memcmp(cg_inosused(newcg), cg_inosused(cg), mapsize) != 0 && dofix(&idesc[1], "BLK(S) MISSING IN BIT MAPS")) { memmove(cg_inosused(cg), cg_inosused(newcg), (size_t)mapsize); dirty(cgbp); } } if (cursnapshot == 0 && memcmp(&cstotal, &fs->fs_cstotal, sizeof cstotal) != 0 && dofix(&idesc[0], "SUMMARY BLK COUNT(S) WRONG IN SUPERBLK")) { memmove(&fs->fs_cstotal, &cstotal, sizeof cstotal); fs->fs_ronly = 0; fs->fs_fmod = 0; sbdirty(); } /* * When doing background fsck on a snapshot, figure out whether * the superblock summary is inaccurate and correct it when * necessary. */ if (cursnapshot != 0) { cmd.size = 1; cmd.value = cstotal.cs_ndir - fs->fs_cstotal.cs_ndir; if (cmd.value != 0) { if (debug) printf("adjndir by %+" PRIi64 "\n", cmd.value); if (bkgrdsumadj == 0 || sysctl(adjndir, MIBSIZE, 0, 0, &cmd, sizeof cmd) == -1) rwerror("ADJUST NUMBER OF DIRECTORIES", cmd.value); } cmd.value = cstotal.cs_nbfree - fs->fs_cstotal.cs_nbfree; if (cmd.value != 0) { if (debug) printf("adjnbfree by %+" PRIi64 "\n", cmd.value); if (bkgrdsumadj == 0 || sysctl(adjnbfree, MIBSIZE, 0, 0, &cmd, sizeof cmd) == -1) rwerror("ADJUST NUMBER OF FREE BLOCKS", cmd.value); } cmd.value = cstotal.cs_nifree - fs->fs_cstotal.cs_nifree; if (cmd.value != 0) { if (debug) printf("adjnifree by %+" PRIi64 "\n", cmd.value); if (bkgrdsumadj == 0 || sysctl(adjnifree, MIBSIZE, 0, 0, &cmd, sizeof cmd) == -1) rwerror("ADJUST NUMBER OF FREE INODES", cmd.value); } cmd.value = cstotal.cs_nffree - fs->fs_cstotal.cs_nffree; if (cmd.value != 0) { if (debug) printf("adjnffree by %+" PRIi64 "\n", cmd.value); if (bkgrdsumadj == 0 || sysctl(adjnffree, MIBSIZE, 0, 0, &cmd, sizeof cmd) == -1) rwerror("ADJUST NUMBER OF FREE FRAGS", cmd.value); } cmd.value = cstotal.cs_numclusters - fs->fs_cstotal.cs_numclusters; if (cmd.value != 0) { if (debug) printf("adjnumclusters by %+" PRIi64 "\n", cmd.value); if (bkgrdsumadj == 0 || sysctl(adjnumclusters, MIBSIZE, 0, 0, &cmd, sizeof cmd) == -1) rwerror("ADJUST NUMBER OF FREE CLUSTERS", cmd.value); } } }