int chkuse( daddr_t blkno, int cnt ) { int cg; daddr_t fsbn, bn; fsbn = dbtofsb( fs, blkno ); if ( (unsigned) ( fsbn + cnt ) > fs->fs_size ) { printf( "block %ld out of range of file system\n", (long) blkno ); return ( 1 ); } cg = dtog( fs, fsbn ); if ( fsbn < cgdmin( fs, cg ) ) { if ( cg == 0 || ( fsbn + cnt ) > cgsblock( fs, cg ) ) { printf( "block %ld in non-data area: cannot attach\n", (long) blkno ); return ( 1 ); } } else { if ( ( fsbn + cnt ) > cgbase( fs, cg + 1 ) ) { printf( "block %ld in non-data area: cannot attach\n", (long) blkno ); return ( 1 ); } } if ( cgread1( &disk, cg ) != 1 ) { fprintf( stderr, "cg %d: could not be read\n", cg ); errs++; return ( 1 ); } if ( !cg_chkmagic( &acg ) ) { fprintf( stderr, "cg %d: bad magic number\n", cg ); errs++; return ( 1 ); } bn = dtogd( fs, fsbn ); if ( isclr( cg_blksfree( &acg ), bn ) ) printf( "Warning: sector %ld is in use\n", (long) blkno ); return ( 0 ); }
/* * Change the number of unreferenced inodes. */ static int ufs_gjournal_modref(struct vnode *vp, int count) { struct cg *cgp; struct buf *bp; ufs2_daddr_t cgbno; int error, cg; struct cdev *dev; struct inode *ip; struct ufsmount *ump; struct fs *fs; struct vnode *devvp; ino_t ino; ip = VTOI(vp); ump = VFSTOUFS(vp->v_mount); fs = ump->um_fs; devvp = ump->um_devvp; ino = ip->i_number; cg = ino_to_cg(fs, ino); if (devvp->v_type == VREG) { /* devvp is a snapshot */ dev = VFSTOUFS(devvp->v_mount)->um_devvp->v_rdev; cgbno = fragstoblks(fs, cgtod(fs, cg)); } else if (devvp->v_type == VCHR) { /* devvp is a normal disk device */ dev = devvp->v_rdev; cgbno = fsbtodb(fs, cgtod(fs, cg)); } else { bp = NULL; return (EIO); } if ((u_int)ino >= fs->fs_ipg * fs->fs_ncg) panic("ufs_gjournal_modref: range: dev = %s, ino = %lu, fs = %s", devtoname(dev), (u_long)ino, fs->fs_fsmnt); if ((error = bread(devvp, cgbno, (int)fs->fs_cgsize, NOCRED, &bp))) { brelse(bp); return (error); } cgp = (struct cg *)bp->b_data; if (!cg_chkmagic(cgp)) { brelse(bp); return (0); } bp->b_xflags |= BX_BKGRDWRITE; cgp->cg_unrefs += count; UFS_LOCK(ump); fs->fs_unrefs += count; fs->fs_fmod = 1; ACTIVECLEAR(fs, cg); UFS_UNLOCK(ump); bdwrite(bp); return (0); }
/* * allocate an unused inode */ ufs1_ino_t allocino(ufs1_ino_t request, int type) { ufs1_ino_t ino; struct ufs1_dinode *dp; struct cg *cgp = &cgrp; int cg; if (request == 0) request = ROOTINO; else if (inoinfo(request)->ino_state != USTATE) return (0); for (ino = request; ino < maxino; ino++) if (inoinfo(ino)->ino_state == USTATE) break; if (ino == maxino) return (0); cg = ino_to_cg(&sblock, ino); getblk(&cgblk, cgtod(&sblock, cg), sblock.fs_cgsize); if (!cg_chkmagic(cgp)) pfatal("CG %d: BAD MAGIC NUMBER\n", cg); setbit(cg_inosused(cgp), ino % sblock.fs_ipg); cgp->cg_cs.cs_nifree--; switch (type & IFMT) { case IFDIR: inoinfo(ino)->ino_state = DSTATE; cgp->cg_cs.cs_ndir++; break; case IFREG: case IFLNK: inoinfo(ino)->ino_state = FSTATE; break; default: return (0); } cgdirty(); dp = ginode(ino); dp->di_db[0] = allocblk((long)1); if (dp->di_db[0] == 0) { inoinfo(ino)->ino_state = USTATE; return (0); } dp->di_mode = type; dp->di_flags = 0; dp->di_atime = time(NULL); dp->di_mtime = dp->di_ctime = dp->di_atime; dp->di_mtimensec = dp->di_ctimensec = dp->di_atimensec = 0; dp->di_size = sblock.fs_fsize; dp->di_blocks = btodb(sblock.fs_fsize); n_files++; inodirty(); if (newinofmt) inoinfo(ino)->ino_type = IFTODT(type); return (ino); }
static union dinode * get_inode(int fd, struct fs *super, ino_t ino) { static caddr_t ipbuf; static struct cg *cgp; static ino_t last; static int cg; struct ufs2_dinode *di2; if (fd < 0) { /* flush cache */ if (ipbuf) { free(ipbuf); ipbuf = NULL; if (super != NULL && super->fs_magic == FS_UFS2_MAGIC) { free(cgp); cgp = NULL; } } return 0; } if (!ipbuf || ino < last || ino >= last + INOCNT(super)) { if (super->fs_magic == FS_UFS2_MAGIC && (!cgp || cg != ino_to_cg(super, ino))) { cg = ino_to_cg(super, ino); if (!cgp && !(cgp = malloc(super->fs_cgsize))) errx(1, "allocate cg"); if (pread(fd, cgp, super->fs_cgsize, (off_t)cgtod(super, cg) << super->fs_fshift) != super->fs_cgsize) if (read(fd, cgp, super->fs_cgsize) != super->fs_cgsize) err(1, "read cg"); if (!cg_chkmagic(cgp)) errx(1, "cg has bad magic"); } if (!ipbuf && !(ipbuf = malloc(INOSZ(super)))) err(1, "allocate inodes"); last = (ino / INOCNT(super)) * INOCNT(super); if (lseek(fd, (off_t)ino_to_fsba(super, last) << super->fs_fshift, SEEK_SET) < 0 || read(fd, ipbuf, INOSZ(super)) != INOSZ(super)) { err(1, "read inodes"); } } if (super->fs_magic == FS_UFS1_MAGIC) return ((union dinode *) &((struct ufs1_dinode *)ipbuf)[ino % INOCNT(super)]); di2 = &((struct ufs2_dinode *)ipbuf)[ino % INOCNT(super)]; /* If the inode is unused, it might be unallocated too, so zero it. */ if (isclr(cg_inosused(cgp), ino % super->fs_ipg)) memset(di2, 0, sizeof(*di2)); return ((union dinode *)di2); }
void fs_mapinodes(ino_t maxino, int64_t *tapesize, int *anydirskipped) { int i, cg, inosused; struct cg *cgp; ino_t ino; char *cp; if ((cgp = malloc(sblock->fs_cgsize)) == NULL) quit("fs_mapinodes: cannot allocate memory.\n"); for (cg = 0; cg < sblock->fs_ncg; cg++) { ino = cg * sblock->fs_ipg; bread(fsbtodb(sblock, cgtod(sblock, cg)), (char *)cgp, sblock->fs_cgsize); if (sblock->fs_magic == FS_UFS2_MAGIC) inosused = cgp->cg_initediblk; else inosused = sblock->fs_ipg; /* * If we are using soft updates, then we can trust the * cylinder group inode allocation maps to tell us which * inodes are allocated. We will scan the used inode map * to find the inodes that are really in use, and then * read only those inodes in from disk. */ if (sblock->fs_flags & FS_DOSOFTDEP) { if (!cg_chkmagic(cgp)) quit("mapfiles: cg %d: bad magic number\n", cg); cp = &cg_inosused(cgp)[(inosused - 1) / CHAR_BIT]; for ( ; inosused > 0; inosused -= CHAR_BIT, cp--) { if (*cp == 0) continue; for (i = 1 << (CHAR_BIT - 1); i > 0; i >>= 1) { if (*cp & i) break; inosused--; } break; } if (inosused <= 0) continue; } for (i = 0; i < inosused; i++, ino++) { if (ino < ROOTINO) continue; mapfileino(ino, tapesize, anydirskipped); } } free(cgp); }
/* * allocate a data block with the specified number of fragments */ ufs_daddr_t allocblk(long frags) { int i, j, k, cg, baseblk; struct cg *cgp = &cgrp; if (frags <= 0 || frags > sblock.fs_frag) return (0); for (i = 0; i < maxfsblock - sblock.fs_frag; i += sblock.fs_frag) { for (j = 0; j <= sblock.fs_frag - frags; j++) { if (testbmap(i + j)) continue; for (k = 1; k < frags; k++) if (testbmap(i + j + k)) break; if (k < frags) { j += k; continue; } cg = dtog(&sblock, i + j); getblk(&cgblk, cgtod(&sblock, cg), sblock.fs_cgsize); if (!cg_chkmagic(cgp)) pfatal("CG %d: BAD MAGIC NUMBER\n", cg); baseblk = dtogd(&sblock, i + j); for (k = 0; k < frags; k++) { setbmap(i + j + k); clrbit(cg_blksfree(cgp), baseblk + k); } n_blks += frags; if (frags == sblock.fs_frag) cgp->cg_cs.cs_nbfree--; else cgp->cg_cs.cs_nffree -= frags; cgdirty(); return (i + j); } } return (0); }
/* * Scan the specified file system to check quota(s) present on it. */ int chkquota(const char *vfstype, const char *fsname, const char *mntpt, void *auxarg, pid_t *pidp) { struct quotaname *qnp = auxarg; struct fileusage *fup; union dinode *dp; int cg, i, mode, errs = 0, status; ino_t ino, inosused; pid_t pid; char *cp; switch (pid = fork()) { case -1: /* error */ warn("fork"); return 1; case 0: /* child */ if ((fi = open(fsname, O_RDONLY, 0)) < 0) err(1, "%s", fsname); sync(); dev_bsize = 1; for (i = 0; sblock_try[i] != -1; i++) { bread(sblock_try[i], (char *)&sblock, (long)SBLOCKSIZE); if ((sblock.fs_magic == FS_UFS1_MAGIC || (sblock.fs_magic == FS_UFS2_MAGIC && sblock.fs_sblockloc == sblock_try[i])) && sblock.fs_bsize <= MAXBSIZE && sblock.fs_bsize >= sizeof(struct fs)) break; } if (sblock_try[i] == -1) { warn("Cannot find file system superblock"); return (1); } dev_bsize = sblock.fs_fsize / fsbtodb(&sblock, 1); maxino = sblock.fs_ncg * sblock.fs_ipg; for (cg = 0; cg < sblock.fs_ncg; cg++) { ino = cg * sblock.fs_ipg; setinodebuf(ino); bread(fsbtodb(&sblock, cgtod(&sblock, cg)), (char *)(&cgblk), sblock.fs_cgsize); if (sblock.fs_magic == FS_UFS2_MAGIC) inosused = cgblk.cg_initediblk; else inosused = sblock.fs_ipg; /* * If we are using soft updates, then we can trust the * cylinder group inode allocation maps to tell us which * inodes are allocated. We will scan the used inode map * to find the inodes that are really in use, and then * read only those inodes in from disk. */ if (sblock.fs_flags & FS_DOSOFTDEP) { if (!cg_chkmagic(&cgblk)) errx(1, "CG %d: BAD MAGIC NUMBER\n", cg); cp = &cg_inosused(&cgblk)[(inosused - 1) / CHAR_BIT]; for ( ; inosused > 0; inosused -= CHAR_BIT, cp--) { if (*cp == 0) continue; for (i = 1 << (CHAR_BIT - 1); i > 0; i >>= 1) { if (*cp & i) break; inosused--; } break; } if (inosused <= 0) continue; } for (i = 0; i < inosused; i++, ino++) { if ((dp = getnextinode(ino)) == NULL || ino < ROOTINO || (mode = DIP(dp, di_mode) & IFMT) == 0) continue; if (qnp->flags & HASGRP) { fup = addid(DIP(dp, di_gid), GRPQUOTA, NULL); fup->fu_curinodes++; if (mode == IFREG || mode == IFDIR || mode == IFLNK) fup->fu_curblocks += DIP(dp, di_blocks); } if (qnp->flags & HASUSR) { fup = addid(DIP(dp, di_uid), USRQUOTA, NULL); fup->fu_curinodes++; if (mode == IFREG || mode == IFDIR || mode == IFLNK) fup->fu_curblocks += DIP(dp, di_blocks); } } } freeinodebuf(); if (flags&(CHECK_DEBUG|CHECK_VERBOSE)) { (void)printf("*** Checking "); if (qnp->flags & HASUSR) { (void)printf("%s", qfextension[USRQUOTA]); if (qnp->flags & HASGRP) (void)printf(" and "); } if (qnp->flags & HASGRP) (void)printf("%s", qfextension[GRPQUOTA]); (void)printf(" quotas for %s (%s), %swait\n", fsname, mntpt, pidp? "no" : ""); } if (qnp->flags & HASUSR) errs += update(mntpt, qnp->usrqfname, USRQUOTA); if (qnp->flags & HASGRP) errs += update(mntpt, qnp->grpqfname, GRPQUOTA); close(fi); exit (errs); break; default: /* parent */ if (pidp != NULL) { *pidp = pid; return 0; } if (waitpid(pid, &status, 0) < 0) { warn("waitpid"); return 1; } if (WIFEXITED(status)) { if (WEXITSTATUS(status) != 0) return WEXITSTATUS(status); } else if (WIFSIGNALED(status)) { warnx("%s: %s", fsname, strsignal(WTERMSIG(status))); return 1; } break; } return (0); }
/* * Free a block or fragment. * * The specified block or fragment is placed back in the * free map. If a fragment is deallocated, a possible * block reassembly is checked. */ void ffs_blkfree(struct inode *ip, daddr_t bno, long size) { struct cg *cgp; struct buf *bp; int32_t fragno, cgbno; int i, error, cg, blk, frags, bbase; struct fs *fs = ip->i_fs; const int needswap = UFS_FSNEEDSWAP(fs); if (size > fs->fs_bsize || ffs_fragoff(fs, size) != 0 || ffs_fragnum(fs, bno) + ffs_numfrags(fs, size) > fs->fs_frag) { errx(1, "blkfree: bad size: bno %lld bsize %d size %ld", (long long)bno, fs->fs_bsize, size); } cg = dtog(fs, bno); if (bno >= fs->fs_size) { warnx("bad block %lld, ino %llu", (long long)bno, (unsigned long long)ip->i_number); return; } error = bread(ip->i_devvp, FFS_FSBTODB(fs, cgtod(fs, cg)), (int)fs->fs_cgsize, 0, &bp); if (error) { brelse(bp, 0); return; } cgp = (struct cg *)bp->b_data; if (!cg_chkmagic(cgp, needswap)) { brelse(bp, 0); return; } cgbno = dtogd(fs, bno); if (size == fs->fs_bsize) { fragno = ffs_fragstoblks(fs, cgbno); if (!ffs_isfreeblock(fs, cg_blksfree(cgp, needswap), fragno)) { errx(1, "blkfree: freeing free block %lld", (long long)bno); } ffs_setblock(fs, cg_blksfree(cgp, needswap), fragno); ffs_clusteracct(fs, cgp, fragno, 1); ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap); fs->fs_cstotal.cs_nbfree++; fs->fs_cs(fs, cg).cs_nbfree++; } else { bbase = cgbno - ffs_fragnum(fs, cgbno); /* * decrement the counts associated with the old frags */ blk = blkmap(fs, cg_blksfree(cgp, needswap), bbase); ffs_fragacct(fs, blk, cgp->cg_frsum, -1, needswap); /* * deallocate the fragment */ frags = ffs_numfrags(fs, size); for (i = 0; i < frags; i++) { if (isset(cg_blksfree(cgp, needswap), cgbno + i)) { errx(1, "blkfree: freeing free frag: block %lld", (long long)(cgbno + i)); } setbit(cg_blksfree(cgp, needswap), cgbno + i); } ufs_add32(cgp->cg_cs.cs_nffree, i, needswap); fs->fs_cstotal.cs_nffree += i; fs->fs_cs(fs, cg).cs_nffree += i; /* * add back in counts associated with the new frags */ blk = blkmap(fs, cg_blksfree(cgp, needswap), bbase); ffs_fragacct(fs, blk, cgp->cg_frsum, 1, needswap); /* * if a complete block has been reassembled, account for it */ fragno = ffs_fragstoblks(fs, bbase); if (ffs_isblock(fs, cg_blksfree(cgp, needswap), fragno)) { ufs_add32(cgp->cg_cs.cs_nffree, -fs->fs_frag, needswap); fs->fs_cstotal.cs_nffree -= fs->fs_frag; fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; ffs_clusteracct(fs, cgp, fragno, 1); ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap); fs->fs_cstotal.cs_nbfree++; fs->fs_cs(fs, cg).cs_nbfree++; } } fs->fs_fmod = 1; bdwrite(bp); }
/* * Determine whether a block can be allocated. * * Check to see if a block of the appropriate size is available, * and if it is, allocate it. */ static daddr_t ffs_alloccg(struct inode *ip, int cg, daddr_t bpref, int size) { struct cg *cgp; struct buf *bp; daddr_t bno, blkno; int error, frags, allocsiz, i; struct fs *fs = ip->i_fs; const int needswap = UFS_FSNEEDSWAP(fs); if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) return (0); error = bread(ip->i_devvp, FFS_FSBTODB(fs, cgtod(fs, cg)), (int)fs->fs_cgsize, 0, &bp); if (error) { return (0); } cgp = (struct cg *)bp->b_data; if (!cg_chkmagic(cgp, needswap) || (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) { brelse(bp, 0); return (0); } if (size == fs->fs_bsize) { bno = ffs_alloccgblk(ip, bp, bpref); bwrite(bp); return (bno); } /* * check to see if any fragments are already available * allocsiz is the size which will be allocated, hacking * it down to a smaller size if necessary */ frags = ffs_numfrags(fs, size); for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) if (cgp->cg_frsum[allocsiz] != 0) break; if (allocsiz == fs->fs_frag) { /* * no fragments were available, so a block will be * allocated, and hacked up */ if (cgp->cg_cs.cs_nbfree == 0) { brelse(bp, 0); return (0); } bno = ffs_alloccgblk(ip, bp, bpref); bpref = dtogd(fs, bno); for (i = frags; i < fs->fs_frag; i++) setbit(cg_blksfree(cgp, needswap), bpref + i); i = fs->fs_frag - frags; ufs_add32(cgp->cg_cs.cs_nffree, i, needswap); fs->fs_cstotal.cs_nffree += i; fs->fs_cs(fs, cg).cs_nffree += i; fs->fs_fmod = 1; ufs_add32(cgp->cg_frsum[i], 1, needswap); bdwrite(bp); return (bno); } bno = ffs_mapsearch(fs, cgp, bpref, allocsiz); for (i = 0; i < frags; i++) clrbit(cg_blksfree(cgp, needswap), bno + i); ufs_add32(cgp->cg_cs.cs_nffree, -frags, needswap); fs->fs_cstotal.cs_nffree -= frags; fs->fs_cs(fs, cg).cs_nffree -= frags; fs->fs_fmod = 1; ufs_add32(cgp->cg_frsum[allocsiz], -1, needswap); if (frags != allocsiz) ufs_add32(cgp->cg_frsum[allocsiz - frags], 1, needswap); blkno = cg * fs->fs_fpg + bno; bdwrite(bp); return blkno; }
/* * Scan the specified file system to check quota(s) present on it. */ int chkquota(char *specname, struct quotafile *qfu, struct quotafile *qfg) { struct fileusage *fup; union dinode *dp; int cg, i, mode, errs = 0; ino_t ino, inosused, userino = 0, groupino = 0; dev_t dev, userdev = 0, groupdev = 0; struct stat sb; const char *mntpt; char *cp; if (qfu != NULL) mntpt = quota_fsname(qfu); else if (qfg != NULL) mntpt = quota_fsname(qfg); else errx(1, "null quotafile information passed to chkquota()\n"); if (cflag) { if (vflag && qfu != NULL) printf("%s: convert user quota to %d bits\n", mntpt, cflag); if (qfu != NULL && quota_convert(qfu, cflag) < 0) { if (errno == EBADF) errx(1, "%s: cannot convert an active quota file", mntpt); err(1, "user quota conversion to size %d failed", cflag); } if (vflag && qfg != NULL) printf("%s: convert group quota to %d bits\n", mntpt, cflag); if (qfg != NULL && quota_convert(qfg, cflag) < 0) { if (errno == EBADF) errx(1, "%s: cannot convert an active quota file", mntpt); err(1, "group quota conversion to size %d failed", cflag); } } if ((fi = open(specname, O_RDONLY, 0)) < 0) { warn("%s", specname); return (1); } if ((stat(mntpt, &sb)) < 0) { warn("%s", mntpt); return (1); } dev = sb.st_dev; if (vflag) { (void)printf("*** Checking "); if (qfu) (void)printf("user%s", qfg ? " and " : ""); if (qfg) (void)printf("group"); (void)printf(" quotas for %s (%s)\n", specname, mntpt); } if (qfu) { if (stat(quota_qfname(qfu), &sb) == 0) { userino = sb.st_ino; userdev = sb.st_dev; } } if (qfg) { if (stat(quota_qfname(qfg), &sb) == 0) { groupino = sb.st_ino; groupdev = sb.st_dev; } } sync(); dev_bsize = 1; for (i = 0; sblock_try[i] != -1; i++) { bread(sblock_try[i], (char *)&sblock, (long)SBLOCKSIZE); if ((sblock.fs_magic == FS_UFS1_MAGIC || (sblock.fs_magic == FS_UFS2_MAGIC && sblock.fs_sblockloc == sblock_try[i])) && sblock.fs_bsize <= MAXBSIZE && sblock.fs_bsize >= sizeof(struct fs)) break; } if (sblock_try[i] == -1) { warn("Cannot find file system superblock"); return (1); } dev_bsize = sblock.fs_fsize / fsbtodb(&sblock, 1); maxino = sblock.fs_ncg * sblock.fs_ipg; for (cg = 0; cg < sblock.fs_ncg; cg++) { ino = cg * sblock.fs_ipg; setinodebuf(ino); bread(fsbtodb(&sblock, cgtod(&sblock, cg)), (char *)(&cgblk), sblock.fs_cgsize); if (sblock.fs_magic == FS_UFS2_MAGIC) inosused = cgblk.cg_initediblk; else inosused = sblock.fs_ipg; /* * If we are using soft updates, then we can trust the * cylinder group inode allocation maps to tell us which * inodes are allocated. We will scan the used inode map * to find the inodes that are really in use, and then * read only those inodes in from disk. */ if (sblock.fs_flags & FS_DOSOFTDEP) { if (!cg_chkmagic(&cgblk)) errx(1, "CG %d: BAD MAGIC NUMBER\n", cg); cp = &cg_inosused(&cgblk)[(inosused - 1) / CHAR_BIT]; for ( ; inosused > 0; inosused -= CHAR_BIT, cp--) { if (*cp == 0) continue; for (i = 1 << (CHAR_BIT - 1); i > 0; i >>= 1) { if (*cp & i) break; inosused--; } break; } if (inosused <= 0) continue; } for (i = 0; i < inosused; i++, ino++) { if ((dp = getnextinode(ino)) == NULL || ino < ROOTINO || (mode = DIP(dp, di_mode) & IFMT) == 0) continue; /* * XXX: Do not account for UIDs or GIDs that appear * to be negative to prevent generating 100GB+ * quota files. */ if ((int)DIP(dp, di_uid) < 0 || (int)DIP(dp, di_gid) < 0) { if (vflag) { if (aflag) (void)printf("%s: ", mntpt); (void)printf("out of range UID/GID (%u/%u) ino=%ju\n", DIP(dp, di_uid), DIP(dp,di_gid), (uintmax_t)ino); } continue; } /* * Do not account for file system snapshot files * or the actual quota data files to be consistent * with how they are handled inside the kernel. */ #ifdef SF_SNAPSHOT if (DIP(dp, di_flags) & SF_SNAPSHOT) continue; #endif if ((ino == userino && dev == userdev) || (ino == groupino && dev == groupdev)) continue; if (qfg) { fup = addid((u_long)DIP(dp, di_gid), GRPQUOTA, (char *)0, mntpt); fup->fu_curinodes++; if (mode == IFREG || mode == IFDIR || mode == IFLNK) fup->fu_curblocks += DIP(dp, di_blocks); } if (qfu) { fup = addid((u_long)DIP(dp, di_uid), USRQUOTA, (char *)0, mntpt); fup->fu_curinodes++; if (mode == IFREG || mode == IFDIR || mode == IFLNK) fup->fu_curblocks += DIP(dp, di_blocks); } } } freeinodebuf(); if (qfu) errs += update(mntpt, qfu, USRQUOTA); if (qfg) errs += update(mntpt, qfg, GRPQUOTA); close(fi); (void)fflush(stdout); return (errs); }
/* * Find a suitable location for the journal in the filesystem. * * Our strategy here is to look for a contiguous block of free space * at least "logfile" MB in size (plus room for any indirect blocks). * We start at the middle of the filesystem and check each cylinder * group working outwards. If "logfile" MB is not available as a * single contigous chunk, then return the address and size of the * largest chunk found. * * XXX * At what stage does the search fail? Is if the largest space we could * find is less than a quarter the requested space reasonable? If the * search fails entirely, return a block address if "0" it indicate this. */ void wapbl_find_log_start(struct mount *mp, struct vnode *vp, off_t logsize, daddr_t *addr, daddr_t *indir_addr, size_t *size) { struct ufsmount *ump = VFSTOUFS(mp); struct fs *fs = ump->um_fs; struct vnode *devvp = ump->um_devvp; struct cg *cgp; struct buf *bp; uint8_t *blksfree; daddr_t blkno, best_addr, start_addr; daddr_t desired_blks, min_desired_blks; daddr_t freeblks, best_blks; int bpcg, cg, error, fixedsize, indir_blks, n, s; #ifdef FFS_EI const int needswap = UFS_FSNEEDSWAP(fs); #endif if (logsize == 0) { fixedsize = 0; /* We can adjust the size if tight */ logsize = lfragtosize(fs, fs->fs_dsize) / UFS_WAPBL_JOURNAL_SCALE; DPRINTF("suggested log size = %lld\n", logsize); logsize = max(logsize, UFS_WAPBL_MIN_JOURNAL_SIZE); logsize = min(logsize, UFS_WAPBL_MAX_JOURNAL_SIZE); DPRINTF("adjusted log size = %lld\n", logsize); } else { fixedsize = 1; DPRINTF("fixed log size = %lld\n", logsize); } desired_blks = logsize / fs->fs_bsize; DPRINTF("desired blocks = %lld\n", desired_blks); /* add in number of indirect blocks needed */ indir_blks = 0; if (desired_blks >= NDADDR) { struct indir indirs[NIADDR + 2]; int num; error = ufs_getlbns(vp, desired_blks, indirs, &num); if (error) { printf("%s: ufs_getlbns failed, error %d!\n", __func__, error); goto bad; } switch (num) { case 2: indir_blks = 1; /* 1st level indirect */ break; case 3: indir_blks = 1 + /* 1st level indirect */ 1 + /* 2nd level indirect */ indirs[1].in_off + 1; /* extra 1st level indirect */ break; default: printf("%s: unexpected numlevels %d from ufs_getlbns\n", __func__, num); *size = 0; goto bad; } desired_blks += indir_blks; } DPRINTF("desired blocks = %lld (including indirect)\n", desired_blks); /* * If a specific size wasn't requested, allow for a smaller log * if we're really tight for space... */ min_desired_blks = desired_blks; if (!fixedsize) min_desired_blks = desired_blks / 4; /* Look at number of blocks per CG. If it's too small, bail early. */ bpcg = fragstoblks(fs, fs->fs_fpg); if (min_desired_blks > bpcg) { printf("ffs_wapbl: cylinder group size of %lld MB " " is not big enough for journal\n", lblktosize(fs, bpcg) / (1024 * 1024)); goto bad; } /* * Start with the middle cylinder group, and search outwards in * both directions until we either find the requested log size * or reach the start/end of the file system. If we reach the * start/end without finding enough space for the full requested * log size, use the largest extent found if it is large enough * to satisfy the our minimum size. * * XXX * Can we just use the cluster contigsum stuff (esp on UFS2) * here to simplify this search code? */ best_addr = 0; best_blks = 0; for (cg = fs->fs_ncg / 2, s = 0, n = 1; best_blks < desired_blks && cg >= 0 && cg < fs->fs_ncg; s++, n = -n, cg += n * s) { DPRINTF("check cg %d of %d\n", cg, fs->fs_ncg); error = bread(devvp, fsbtodb(fs, cgtod(fs, cg)), fs->fs_cgsize, &bp); if (error) { continue; } cgp = (struct cg *)bp->b_data; if (!cg_chkmagic(cgp)) { brelse(bp); continue; } blksfree = cg_blksfree(cgp); for (blkno = 0; blkno < bpcg;) { /* look for next free block */ /* XXX use scanc() and fragtbl[] here? */ for (; blkno < bpcg - min_desired_blks; blkno++) if (ffs_isblock(fs, blksfree, blkno)) break; /* past end of search space in this CG? */ if (blkno >= bpcg - min_desired_blks) break; /* count how many free blocks in this extent */ start_addr = blkno; for (freeblks = 0; blkno < bpcg; blkno++, freeblks++) if (!ffs_isblock(fs, blksfree, blkno)) break; if (freeblks > best_blks) { best_blks = freeblks; best_addr = blkstofrags(fs, start_addr) + cgbase(fs, cg); if (freeblks >= desired_blks) { DPRINTF("found len %lld" " at offset %lld in gc\n", freeblks, start_addr); break; } } } brelse(bp); } DPRINTF("best found len = %lld, wanted %lld" " at addr %lld\n", best_blks, desired_blks, best_addr); if (best_blks < min_desired_blks) { *addr = 0; *indir_addr = 0; } else { /* put indirect blocks at start, and data blocks after */ *addr = best_addr + blkstofrags(fs, indir_blks); *indir_addr = best_addr; } *size = min(desired_blks, best_blks) - indir_blks; return; bad: *addr = 0; *indir_addr = 0; *size = 0; return; }
pass5() { int c, blk, frags, basesize, sumsize, mapsize, savednrpos; register struct fs *fs = &sblock; register struct cg *cg = &cgrp; daddr_t dbase, dmax; register daddr_t d; register long i, j; struct csum *cs; time_t now; struct csum cstotal; struct inodesc idesc; char buf[MAXBSIZE]; register struct cg *newcg = (struct cg *)buf; struct ocg *ocg = (struct ocg *)buf; bzero((char *)newcg, fs->fs_cgsize); newcg->cg_niblk = fs->fs_ipg; switch (fs->fs_postblformat) { case FS_42POSTBLFMT: basesize = (char *)(&ocg->cg_btot[0]) - (char *)(&ocg->cg_link); sumsize = &ocg->cg_iused[0] - (char *)(&ocg->cg_btot[0]); mapsize = &ocg->cg_free[howmany(fs->fs_fpg, NBBY)] - (u_char *)&ocg->cg_iused[0]; ocg->cg_magic = CG_MAGIC; savednrpos = fs->fs_nrpos; fs->fs_nrpos = 8; break; case FS_DYNAMICPOSTBLFMT: newcg->cg_btotoff = &newcg->cg_space[0] - (u_char *)(&newcg->cg_link); newcg->cg_boff = newcg->cg_btotoff + fs->fs_cpg * sizeof(long); newcg->cg_iusedoff = newcg->cg_boff + fs->fs_cpg * fs->fs_nrpos * sizeof(short); newcg->cg_freeoff = newcg->cg_iusedoff + howmany(fs->fs_ipg, NBBY); newcg->cg_nextfreeoff = newcg->cg_freeoff + howmany(fs->fs_cpg * fs->fs_spc / NSPF(fs), NBBY); newcg->cg_magic = CG_MAGIC; basesize = &newcg->cg_space[0] - (u_char *)(&newcg->cg_link); sumsize = newcg->cg_iusedoff - newcg->cg_btotoff; mapsize = newcg->cg_nextfreeoff - newcg->cg_iusedoff; break; default: errexit("UNKNOWN ROTATIONAL TABLE FORMAT %d\n", fs->fs_postblformat); } bzero((char *)&idesc, sizeof(struct inodesc)); idesc.id_type = ADDR; bzero((char *)&cstotal, sizeof(struct csum)); (void)time(&now); for (i = fs->fs_size; i < fragroundup(fs, fs->fs_size); i++) setbmap(i); for (c = 0; c < fs->fs_ncg; c++) { getblk(&cgblk, cgtod(fs, c), fs->fs_cgsize); if (!cg_chkmagic(cg)) pfatal("CG %d: BAD MAGIC NUMBER\n", c); dbase = cgbase(fs, c); dmax = dbase + fs->fs_fpg; if (dmax > fs->fs_size) dmax = fs->fs_size; if (now > cg->cg_time) newcg->cg_time = cg->cg_time; else newcg->cg_time = now; newcg->cg_cgx = c; if (c == fs->fs_ncg - 1) newcg->cg_ncyl = fs->fs_ncyl % fs->fs_cpg; else newcg->cg_ncyl = fs->fs_cpg; newcg->cg_ndblk = dmax - dbase; newcg->cg_cs.cs_ndir = 0; newcg->cg_cs.cs_nffree = 0; newcg->cg_cs.cs_nbfree = 0; newcg->cg_cs.cs_nifree = fs->fs_ipg; if (cg->cg_rotor < newcg->cg_ndblk) newcg->cg_rotor = cg->cg_rotor; else newcg->cg_rotor = 0; if (cg->cg_frotor < newcg->cg_ndblk) newcg->cg_frotor = cg->cg_frotor; else newcg->cg_frotor = 0; if (cg->cg_irotor < newcg->cg_niblk) newcg->cg_irotor = cg->cg_irotor; else newcg->cg_irotor = 0; bzero((char *)&newcg->cg_frsum[0], sizeof newcg->cg_frsum); bzero((char *)&cg_blktot(newcg)[0], sumsize + mapsize); if (fs->fs_postblformat == FS_42POSTBLFMT) ocg->cg_magic = CG_MAGIC; j = fs->fs_ipg * c; for (i = 0; i < fs->fs_ipg; j++, i++) { switch (statemap[j]) { case USTATE: break; case DSTATE: case DCLEAR: case DFOUND: newcg->cg_cs.cs_ndir++; /* fall through */ case FSTATE: case FCLEAR: newcg->cg_cs.cs_nifree--; setbit(cg_inosused(newcg), i); break; default: if (j < ROOTINO) break; errexit("BAD STATE %d FOR INODE I=%d", statemap[j], j); } } if (c == 0) for (i = 0; i < ROOTINO; i++) { setbit(cg_inosused(newcg), i); newcg->cg_cs.cs_nifree--; } for (i = 0, d = dbase; d < dmax; d += fs->fs_frag, i += fs->fs_frag) { frags = 0; for (j = 0; j < fs->fs_frag; j++) { if (getbmap(d + j)) continue; setbit(cg_blksfree(newcg), i + j); frags++; } if (frags == fs->fs_frag) { newcg->cg_cs.cs_nbfree++; j = cbtocylno(fs, i); cg_blktot(newcg)[j]++; cg_blks(fs, newcg, j)[cbtorpos(fs, i)]++; } else if (frags > 0) { newcg->cg_cs.cs_nffree += frags; blk = blkmap(fs, cg_blksfree(newcg), i); fragacct(fs, blk, newcg->cg_frsum, 1); } } cstotal.cs_nffree += newcg->cg_cs.cs_nffree; cstotal.cs_nbfree += newcg->cg_cs.cs_nbfree; cstotal.cs_nifree += newcg->cg_cs.cs_nifree; cstotal.cs_ndir += newcg->cg_cs.cs_ndir; cs = &fs->fs_cs(fs, c); if (bcmp((char *)&newcg->cg_cs, (char *)cs, sizeof *cs) != 0 && dofix(&idesc, "FREE BLK COUNT(S) WRONG IN SUPERBLK")) { bcopy((char *)&newcg->cg_cs, (char *)cs, sizeof *cs); sbdirty(); } if (cvtflag) { bcopy((char *)newcg, (char *)cg, fs->fs_cgsize); cgdirty(); continue; } if (bcmp(cg_inosused(newcg), cg_inosused(cg), mapsize) != 0 && dofix(&idesc, "BLK(S) MISSING IN BIT MAPS")) { bcopy(cg_inosused(newcg), cg_inosused(cg), mapsize); cgdirty(); } if ((bcmp((char *)newcg, (char *)cg, basesize) != 0 || bcmp((char *)&cg_blktot(newcg)[0], (char *)&cg_blktot(cg)[0], sumsize) != 0) && dofix(&idesc, "SUMMARY INFORMATION BAD")) { bcopy((char *)newcg, (char *)cg, basesize); bcopy((char *)&cg_blktot(newcg)[0], (char *)&cg_blktot(cg)[0], sumsize); cgdirty(); } } if (fs->fs_postblformat == FS_42POSTBLFMT) fs->fs_nrpos = savednrpos; if (bcmp((char *)&cstotal, (char *)&fs->fs_cstotal, sizeof *cs) != 0 && dofix(&idesc, "FREE BLK COUNT(S) WRONG IN SUPERBLK")) { bcopy((char *)&cstotal, (char *)&fs->fs_cstotal, sizeof *cs); fs->fs_ronly = 0; fs->fs_fmod = 0; sbdirty(); } }
/* * Dump pass 1. * * Walk the inode list for a file system to find all allocated inodes * that have been modified since the previous dump time. Also, find all * the directories in the file system. */ int mapfiles(ino_t maxino, long *tapesize) { int i, cg, mode, inosused; int anydirskipped = 0; union dinode *dp; struct cg *cgp; ino_t ino; u_char *cp; if ((cgp = malloc(sblock->fs_cgsize)) == NULL) quit("mapfiles: cannot allocate memory.\n"); for (cg = 0; cg < sblock->fs_ncg; cg++) { ino = cg * sblock->fs_ipg; bread(fsbtodb(sblock, cgtod(sblock, cg)), (char *)cgp, sblock->fs_cgsize); if (sblock->fs_magic == FS_UFS2_MAGIC) inosused = cgp->cg_initediblk; else inosused = sblock->fs_ipg; /* * If we are using soft updates, then we can trust the * cylinder group inode allocation maps to tell us which * inodes are allocated. We will scan the used inode map * to find the inodes that are really in use, and then * read only those inodes in from disk. */ if (sblock->fs_flags & FS_DOSOFTDEP) { if (!cg_chkmagic(cgp)) quit("mapfiles: cg %d: bad magic number\n", cg); cp = &cg_inosused(cgp)[(inosused - 1) / CHAR_BIT]; for ( ; inosused > 0; inosused -= CHAR_BIT, cp--) { if (*cp == 0) continue; for (i = 1 << (CHAR_BIT - 1); i > 0; i >>= 1) { if (*cp & i) break; inosused--; } break; } if (inosused <= 0) continue; } for (i = 0; i < inosused; i++, ino++) { if (ino < ROOTINO || (dp = getino(ino, &mode)) == NULL || (mode & IFMT) == 0) continue; if (ino >= maxino) { msg("Skipping inode %d >= maxino %d\n", ino, maxino); continue; } /* * Everything must go in usedinomap so that a check * for "in dumpdirmap but not in usedinomap" to detect * dirs with nodump set has a chance of succeeding * (this is used in mapdirs()). */ SETINO(ino, usedinomap); if (mode == IFDIR) SETINO(ino, dumpdirmap); if (WANTTODUMP(dp)) { SETINO(ino, dumpinomap); if (mode != IFREG && mode != IFDIR && mode != IFLNK) *tapesize += 1; else *tapesize += blockest(dp); continue; } if (mode == IFDIR) { if (!nonodump && (DIP(dp, di_flags) & UF_NODUMP)) CLRINO(ino, usedinomap); anydirskipped = 1; } } } /* * Restore gets very upset if the root is not dumped, * so ensure that it always is dumped. */ SETINO(ROOTINO, dumpinomap); return (anydirskipped); }
void pass5(void) { int c, blk, frags, basesize, sumsize, mapsize, savednrpos = 0; int inomapsize, blkmapsize; struct fs *fs = &sblock; struct cg *cg = &cgrp; ufs_daddr_t dbase, dmax; ufs_daddr_t d; long i, j, k; struct csum *cs; struct csum cstotal; struct inodesc idesc[3]; char buf[MAXBSIZE]; struct cg *newcg = (struct cg *)buf; struct ocg *ocg = (struct ocg *)buf; inoinfo(WINO)->ino_state = USTATE; memset(newcg, 0, (size_t)fs->fs_cgsize); newcg->cg_niblk = fs->fs_ipg; if (cvtlevel >= 3) { if (fs->fs_maxcontig < 2 && fs->fs_contigsumsize > 0) { if (preen) pwarn("DELETING CLUSTERING MAPS\n"); if (preen || reply("DELETE CLUSTERING MAPS")) { fs->fs_contigsumsize = 0; doinglevel1 = 1; sbdirty(); } } if (fs->fs_maxcontig > 1) { char *doit = NULL; if (fs->fs_contigsumsize < 1) { doit = "CREAT"; } else if (fs->fs_contigsumsize < fs->fs_maxcontig && fs->fs_contigsumsize < FS_MAXCONTIG) { doit = "EXPAND"; } if (doit) { i = fs->fs_contigsumsize; fs->fs_contigsumsize = MIN(fs->fs_maxcontig, FS_MAXCONTIG); if (CGSIZE(fs) > fs->fs_bsize) { pwarn("CANNOT %s CLUSTER MAPS\n", doit); fs->fs_contigsumsize = i; } else if (preen || reply("CREATE CLUSTER MAPS")) { if (preen) pwarn("%sING CLUSTER MAPS\n", doit); fs->fs_cgsize = fragroundup(fs, CGSIZE(fs)); doinglevel1 = 1; sbdirty(); } } } } switch ((int)fs->fs_postblformat) { case FS_42POSTBLFMT: basesize = (char *)(&ocg->cg_btot[0]) - (char *)(&ocg->cg_firstfield); sumsize = &ocg->cg_iused[0] - (u_int8_t *)(&ocg->cg_btot[0]); mapsize = &ocg->cg_free[howmany(fs->fs_fpg, NBBY)] - (u_char *)&ocg->cg_iused[0]; blkmapsize = howmany(fs->fs_fpg, NBBY); inomapsize = &ocg->cg_free[0] - (u_char *)&ocg->cg_iused[0]; ocg->cg_magic = CG_MAGIC; savednrpos = fs->fs_nrpos; fs->fs_nrpos = 8; break; case FS_DYNAMICPOSTBLFMT: newcg->cg_btotoff = &newcg->cg_space[0] - (u_char *)(&newcg->cg_firstfield); newcg->cg_boff = newcg->cg_btotoff + fs->fs_cpg * sizeof(int32_t); newcg->cg_iusedoff = newcg->cg_boff + fs->fs_cpg * fs->fs_nrpos * sizeof(u_int16_t); newcg->cg_freeoff = newcg->cg_iusedoff + howmany(fs->fs_ipg, NBBY); inomapsize = newcg->cg_freeoff - newcg->cg_iusedoff; newcg->cg_nextfreeoff = newcg->cg_freeoff + howmany(fs->fs_cpg * fs->fs_spc / NSPF(fs), NBBY); blkmapsize = newcg->cg_nextfreeoff - newcg->cg_freeoff; if (fs->fs_contigsumsize > 0) { newcg->cg_clustersumoff = newcg->cg_nextfreeoff - sizeof(u_int32_t); newcg->cg_clustersumoff = roundup(newcg->cg_clustersumoff, sizeof(u_int32_t)); newcg->cg_clusteroff = newcg->cg_clustersumoff + (fs->fs_contigsumsize + 1) * sizeof(u_int32_t); newcg->cg_nextfreeoff = newcg->cg_clusteroff + howmany(fs->fs_cpg * fs->fs_spc / NSPB(fs), NBBY); } newcg->cg_magic = CG_MAGIC; basesize = &newcg->cg_space[0] - (u_char *)(&newcg->cg_firstfield); sumsize = newcg->cg_iusedoff - newcg->cg_btotoff; mapsize = newcg->cg_nextfreeoff - newcg->cg_iusedoff; break; default: inomapsize = blkmapsize = sumsize = 0; /* keep lint happy */ errx(EEXIT, "UNKNOWN ROTATIONAL TABLE FORMAT %d", fs->fs_postblformat); } memset(&idesc[0], 0, sizeof idesc); for (i = 0; i < 3; i++) { idesc[i].id_type = ADDR; if (doinglevel2) idesc[i].id_fix = FIX; } memset(&cstotal, 0, sizeof(struct csum)); j = blknum(fs, fs->fs_size + fs->fs_frag - 1); for (i = fs->fs_size; i < j; i++) setbmap(i); for (c = 0; c < fs->fs_ncg; c++) { if (got_siginfo) { printf("%s: phase 5: cyl group %d of %d (%d%%)\n", cdevname, c, sblock.fs_ncg, c * 100 / sblock.fs_ncg); got_siginfo = 0; } getblk(&cgblk, cgtod(fs, c), fs->fs_cgsize); if (!cg_chkmagic(cg)) pfatal("CG %d: BAD MAGIC NUMBER\n", c); dbase = cgbase(fs, c); dmax = dbase + fs->fs_fpg; if (dmax > fs->fs_size) dmax = fs->fs_size; newcg->cg_time = cg->cg_time; newcg->cg_cgx = c; if (c == fs->fs_ncg - 1) newcg->cg_ncyl = fs->fs_ncyl % fs->fs_cpg; else newcg->cg_ncyl = fs->fs_cpg; newcg->cg_ndblk = dmax - dbase; if (fs->fs_contigsumsize > 0) newcg->cg_nclusterblks = newcg->cg_ndblk / fs->fs_frag; newcg->cg_cs.cs_ndir = 0; newcg->cg_cs.cs_nffree = 0; newcg->cg_cs.cs_nbfree = 0; newcg->cg_cs.cs_nifree = fs->fs_ipg; if ((cg->cg_rotor >= 0) && (cg->cg_rotor < newcg->cg_ndblk)) newcg->cg_rotor = cg->cg_rotor; else newcg->cg_rotor = 0; if ((cg->cg_frotor >= 0) && (cg->cg_frotor < newcg->cg_ndblk)) newcg->cg_frotor = cg->cg_frotor; else newcg->cg_frotor = 0; if ((cg->cg_irotor >= 0) && (cg->cg_irotor < newcg->cg_niblk)) newcg->cg_irotor = cg->cg_irotor; else newcg->cg_irotor = 0; memset(&newcg->cg_frsum[0], 0, sizeof newcg->cg_frsum); memset(&cg_blktot(newcg)[0], 0, (size_t)(sumsize + mapsize)); if (fs->fs_postblformat == FS_42POSTBLFMT) ocg->cg_magic = CG_MAGIC; j = fs->fs_ipg * c; for (i = 0; i < inostathead[c].il_numalloced; j++, i++) { switch (inoinfo(j)->ino_state) { case USTATE: break; case DSTATE: case DCLEAR: case DFOUND: newcg->cg_cs.cs_ndir++; /* fall through */ case FSTATE: case FCLEAR: newcg->cg_cs.cs_nifree--; setbit(cg_inosused(newcg), i); break; default: if (j < ROOTINO) break; errx(EEXIT, "BAD STATE %d FOR INODE I=%ld", inoinfo(j)->ino_state, j); } } if (c == 0) for (i = 0; i < ROOTINO; i++) { setbit(cg_inosused(newcg), i); newcg->cg_cs.cs_nifree--; } for (i = 0, d = dbase; d < dmax; d += fs->fs_frag, i += fs->fs_frag) { frags = 0; for (j = 0; j < fs->fs_frag; j++) { if (testbmap(d + j)) continue; setbit(cg_blksfree(newcg), i + j); frags++; } if (frags == fs->fs_frag) { newcg->cg_cs.cs_nbfree++; j = cbtocylno(fs, i); cg_blktot(newcg)[j]++; cg_blks(fs, newcg, j)[cbtorpos(fs, i)]++; if (fs->fs_contigsumsize > 0) setbit(cg_clustersfree(newcg), i / fs->fs_frag); } else if (frags > 0) { newcg->cg_cs.cs_nffree += frags; blk = blkmap(fs, cg_blksfree(newcg), i); ffs_fragacct(fs, blk, newcg->cg_frsum, 1); } } if (fs->fs_contigsumsize > 0) { int32_t *sump = cg_clustersum(newcg); u_char *mapp = cg_clustersfree(newcg); int map = *mapp++; int bit = 1; int run = 0; for (i = 0; i < newcg->cg_nclusterblks; i++) { if ((map & bit) != 0) { run++; } else if (run != 0) { if (run > fs->fs_contigsumsize) run = fs->fs_contigsumsize; sump[run]++; run = 0; } if ((i & (NBBY - 1)) != (NBBY - 1)) { bit <<= 1; } else { map = *mapp++; bit = 1; } } if (run != 0) { if (run > fs->fs_contigsumsize) run = fs->fs_contigsumsize; sump[run]++; } } cstotal.cs_nffree += newcg->cg_cs.cs_nffree; cstotal.cs_nbfree += newcg->cg_cs.cs_nbfree; cstotal.cs_nifree += newcg->cg_cs.cs_nifree; cstotal.cs_ndir += newcg->cg_cs.cs_ndir; cs = &fs->fs_cs(fs, c); if (memcmp(&newcg->cg_cs, cs, sizeof *cs) != 0 && dofix(&idesc[0], "FREE BLK COUNT(S) WRONG IN SUPERBLK")) { memmove(cs, &newcg->cg_cs, sizeof *cs); sbdirty(); } if (doinglevel1) { memmove(cg, newcg, (size_t)fs->fs_cgsize); cgdirty(); continue; } if ((memcmp(newcg, cg, basesize) != 0 || memcmp(&cg_blktot(newcg)[0], &cg_blktot(cg)[0], sumsize) != 0) && dofix(&idesc[2], "SUMMARY INFORMATION BAD")) { memmove(cg, newcg, (size_t)basesize); memmove(&cg_blktot(cg)[0], &cg_blktot(newcg)[0], (size_t)sumsize); cgdirty(); } if (usedsoftdep) { for (i = 0; i < inomapsize; i++) { j = cg_inosused(newcg)[i]; if ((cg_inosused(cg)[i] & j) == j) continue; for (k = 0; k < NBBY; k++) { if ((j & (1 << k)) == 0) continue; if (cg_inosused(cg)[i] & (1 << k)) continue; pwarn("ALLOCATED INODE %d MARKED FREE\n", c * fs->fs_ipg + i * NBBY + k); } } for (i = 0; i < blkmapsize; i++) { j = cg_blksfree(cg)[i]; if ((cg_blksfree(newcg)[i] & j) == j) continue; for (k = 0; k < NBBY; k++) { if ((j & (1 << k)) == 0) continue; if (cg_blksfree(newcg)[i] & (1 << k)) continue; pwarn("ALLOCATED FRAG %d MARKED FREE\n", c * fs->fs_fpg + i * NBBY + k); } } } if (memcmp(cg_inosused(newcg), cg_inosused(cg), mapsize) != 0 && dofix(&idesc[1], "BLK(S) MISSING IN BIT MAPS")) { memmove(cg_inosused(cg), cg_inosused(newcg), (size_t)mapsize); cgdirty(); } } if (fs->fs_postblformat == FS_42POSTBLFMT) fs->fs_nrpos = savednrpos; if (memcmp(&cstotal, &fs->fs_cstotal, sizeof *cs) != 0 && dofix(&idesc[0], "FREE BLK COUNT(S) WRONG IN SUPERBLK")) { memmove(&fs->fs_cstotal, &cstotal, sizeof *cs); fs->fs_ronly = 0; fs->fs_fmod = 0; sbdirty(); } }
/* * Verify cylinder group's magic number and other parameters. If the * test fails, offer an option to rebuild the whole cylinder group. */ int check_cgmagic(int cg, struct cg *cgp) { /* * Extended cylinder group checks. */ if (cg_chkmagic(cgp) && ((sblock.fs_magic == FS_UFS1_MAGIC && cgp->cg_old_niblk == sblock.fs_ipg && cgp->cg_ndblk <= sblock.fs_fpg && cgp->cg_old_ncyl <= sblock.fs_old_cpg) || (sblock.fs_magic == FS_UFS2_MAGIC && cgp->cg_niblk == sblock.fs_ipg && cgp->cg_ndblk <= sblock.fs_fpg && cgp->cg_initediblk <= sblock.fs_ipg))) { return (1); } pfatal("CYLINDER GROUP %d: BAD MAGIC NUMBER", cg); if (!reply("REBUILD CYLINDER GROUP")) { printf("YOU WILL NEED TO RERUN FSCK.\n"); rerun = 1; return (1); } /* * Zero out the cylinder group and then initialize critical fields. * Bit maps and summaries will be recalculated by later passes. */ memset(cgp, 0, (size_t)sblock.fs_cgsize); cgp->cg_magic = CG_MAGIC; cgp->cg_cgx = cg; cgp->cg_niblk = sblock.fs_ipg; cgp->cg_initediblk = sblock.fs_ipg < 2 * INOPB(&sblock) ? sblock.fs_ipg : 2 * INOPB(&sblock); if (cgbase(&sblock, cg) + sblock.fs_fpg < sblock.fs_size) cgp->cg_ndblk = sblock.fs_fpg; else cgp->cg_ndblk = sblock.fs_size - cgbase(&sblock, cg); cgp->cg_iusedoff = &cgp->cg_space[0] - (u_char *)(&cgp->cg_firstfield); if (sblock.fs_magic == FS_UFS1_MAGIC) { cgp->cg_niblk = 0; cgp->cg_initediblk = 0; cgp->cg_old_ncyl = sblock.fs_old_cpg; cgp->cg_old_niblk = sblock.fs_ipg; cgp->cg_old_btotoff = cgp->cg_iusedoff; cgp->cg_old_boff = cgp->cg_old_btotoff + sblock.fs_old_cpg * sizeof(int32_t); cgp->cg_iusedoff = cgp->cg_old_boff + sblock.fs_old_cpg * sizeof(u_int16_t); } cgp->cg_freeoff = cgp->cg_iusedoff + howmany(sblock.fs_ipg, CHAR_BIT); cgp->cg_nextfreeoff = cgp->cg_freeoff + howmany(sblock.fs_fpg,CHAR_BIT); if (sblock.fs_contigsumsize > 0) { cgp->cg_nclusterblks = cgp->cg_ndblk / sblock.fs_frag; cgp->cg_clustersumoff = roundup(cgp->cg_nextfreeoff, sizeof(u_int32_t)); cgp->cg_clustersumoff -= sizeof(u_int32_t); cgp->cg_clusteroff = cgp->cg_clustersumoff + (sblock.fs_contigsumsize + 1) * sizeof(u_int32_t); cgp->cg_nextfreeoff = cgp->cg_clusteroff + howmany(fragstoblks(&sblock, sblock.fs_fpg), CHAR_BIT); } cgdirty(); return (0); }
void pass5(void) { int c, blk, frags, basesize, sumsize, mapsize, cssize; int inomapsize, blkmapsize; struct fs *fs = sblock; daddr_t dbase, dmax; daddr_t d; long i, j, k; struct csum *cs; struct csum_total cstotal; struct inodesc idesc[4]; char buf[MAXBSIZE]; struct cg *newcg = (struct cg *)buf; struct ocg *ocg = (struct ocg *)buf; struct cg *cg = cgrp, *ncg; struct inostat *info; u_int32_t ncgsize; inoinfo(WINO)->ino_state = USTATE; memset(newcg, 0, (size_t)fs->fs_cgsize); newcg->cg_niblk = fs->fs_ipg; if (cvtlevel >= 3) { if (fs->fs_maxcontig < 2 && fs->fs_contigsumsize > 0) { if (preen) pwarn("DELETING CLUSTERING MAPS\n"); if (preen || reply("DELETE CLUSTERING MAPS")) { fs->fs_contigsumsize = 0; doinglevel1 = 1; sbdirty(); } } if (fs->fs_maxcontig > 1) { const char *doit = NULL; if (fs->fs_contigsumsize < 1) { doit = "CREAT"; } else if (fs->fs_contigsumsize < fs->fs_maxcontig && fs->fs_contigsumsize < FS_MAXCONTIG) { doit = "EXPAND"; } if (doit) { i = fs->fs_contigsumsize; fs->fs_contigsumsize = MIN(fs->fs_maxcontig, FS_MAXCONTIG); if (CGSIZE(fs) > fs->fs_bsize) { pwarn("CANNOT %s CLUSTER MAPS\n", doit); fs->fs_contigsumsize = i; } else if (preen || reply("CREATE CLUSTER MAPS")) { if (preen) pwarn("%sING CLUSTER MAPS\n", doit); ncgsize = fragroundup(fs, CGSIZE(fs)); ncg = realloc(cgrp, ncgsize); if (ncg == NULL) errexit( "cannot reallocate cg space"); cg = cgrp = ncg; fs->fs_cgsize = ncgsize; doinglevel1 = 1; sbdirty(); } } } } basesize = &newcg->cg_space[0] - (u_char *)(&newcg->cg_firstfield); cssize = (u_char *)&cstotal.cs_spare[0] - (u_char *)&cstotal.cs_ndir; sumsize = 0; if (is_ufs2) { newcg->cg_iusedoff = basesize; } else { /* * We reserve the space for the old rotation summary * tables for the benefit of old kernels, but do not * maintain them in modern kernels. In time, they can * go away. */ newcg->cg_old_btotoff = basesize; newcg->cg_old_boff = newcg->cg_old_btotoff + fs->fs_old_cpg * sizeof(int32_t); newcg->cg_iusedoff = newcg->cg_old_boff + fs->fs_old_cpg * fs->fs_old_nrpos * sizeof(u_int16_t); memset(&newcg->cg_space[0], 0, newcg->cg_iusedoff - basesize); } inomapsize = howmany(fs->fs_ipg, CHAR_BIT); newcg->cg_freeoff = newcg->cg_iusedoff + inomapsize; blkmapsize = howmany(fs->fs_fpg, CHAR_BIT); newcg->cg_nextfreeoff = newcg->cg_freeoff + blkmapsize; if (fs->fs_contigsumsize > 0) { newcg->cg_clustersumoff = newcg->cg_nextfreeoff - sizeof(u_int32_t); if (isappleufs) { /* Apple PR2216969 gives rationale for this change. * I believe they were mistaken, but we need to * duplicate it for compatibility. -- [email protected] */ newcg->cg_clustersumoff += sizeof(u_int32_t); } newcg->cg_clustersumoff = roundup(newcg->cg_clustersumoff, sizeof(u_int32_t)); newcg->cg_clusteroff = newcg->cg_clustersumoff + (fs->fs_contigsumsize + 1) * sizeof(u_int32_t); newcg->cg_nextfreeoff = newcg->cg_clusteroff + howmany(fragstoblks(fs, fs->fs_fpg), CHAR_BIT); } newcg->cg_magic = CG_MAGIC; mapsize = newcg->cg_nextfreeoff - newcg->cg_iusedoff; if (!is_ufs2 && ((fs->fs_old_flags & FS_FLAGS_UPDATED) == 0)) { switch ((int)fs->fs_old_postblformat) { case FS_42POSTBLFMT: basesize = (char *)(&ocg->cg_btot[0]) - (char *)(&ocg->cg_firstfield); sumsize = &ocg->cg_iused[0] - (u_int8_t *)(&ocg->cg_btot[0]); mapsize = &ocg->cg_free[howmany(fs->fs_fpg, NBBY)] - (u_char *)&ocg->cg_iused[0]; blkmapsize = howmany(fs->fs_fpg, NBBY); inomapsize = &ocg->cg_free[0] - (u_char *)&ocg->cg_iused[0]; ocg->cg_magic = CG_MAGIC; newcg->cg_magic = 0; break; case FS_DYNAMICPOSTBLFMT: sumsize = newcg->cg_iusedoff - newcg->cg_old_btotoff; break; default: errexit("UNKNOWN ROTATIONAL TABLE FORMAT %d", fs->fs_old_postblformat); } } memset(&idesc[0], 0, sizeof idesc); for (i = 0; i < 4; i++) { idesc[i].id_type = ADDR; if (!is_ufs2 && doinglevel2) idesc[i].id_fix = FIX; } memset(&cstotal, 0, sizeof(struct csum_total)); dmax = blknum(fs, fs->fs_size + fs->fs_frag - 1); for (d = fs->fs_size; d < dmax; d++) setbmap(d); for (c = 0; c < fs->fs_ncg; c++) { if (got_siginfo) { fprintf(stderr, "%s: phase 5: cyl group %d of %d (%d%%)\n", cdevname(), c, fs->fs_ncg, c * 100 / fs->fs_ncg); got_siginfo = 0; } #ifdef PROGRESS progress_bar(cdevname(), preen ? NULL : "phase 5", c, fs->fs_ncg); #endif /* PROGRESS */ getblk(&cgblk, cgtod(fs, c), fs->fs_cgsize); memcpy(cg, cgblk.b_un.b_cg, fs->fs_cgsize); if((doswap && !needswap) || (!doswap && needswap)) ffs_cg_swap(cgblk.b_un.b_cg, cg, sblock); if (!doinglevel1 && !cg_chkmagic(cg, 0)) pfatal("CG %d: PASS5: BAD MAGIC NUMBER\n", c); if(doswap) cgdirty(); /* * While we have the disk head where we want it, * write back the superblock to the spare at this * cylinder group. */ if ((cvtlevel && sblk.b_dirty) || doswap) { bwrite(fswritefd, sblk.b_un.b_buf, fsbtodb(sblock, cgsblock(sblock, c)), sblock->fs_sbsize); } else { /* * Read in the current alternate superblock, * and compare it to the master. If it's * wrong, fix it up. */ getblk(&asblk, cgsblock(sblock, c), sblock->fs_sbsize); if (asblk.b_errs) pfatal("CG %d: UNABLE TO READ ALTERNATE " "SUPERBLK\n", c); else { memmove(altsblock, asblk.b_un.b_fs, sblock->fs_sbsize); if (needswap) ffs_sb_swap(asblk.b_un.b_fs, altsblock); } sb_oldfscompat_write(sblock, sblocksave); if ((asblk.b_errs || cmpsblks(sblock, altsblock)) && dofix(&idesc[3], "ALTERNATE SUPERBLK(S) ARE INCORRECT")) { bwrite(fswritefd, sblk.b_un.b_buf, fsbtodb(sblock, cgsblock(sblock, c)), sblock->fs_sbsize); } sb_oldfscompat_read(sblock, 0); } dbase = cgbase(fs, c); dmax = dbase + fs->fs_fpg; if (dmax > fs->fs_size) dmax = fs->fs_size; if (is_ufs2 || (fs->fs_old_flags & FS_FLAGS_UPDATED)) newcg->cg_time = cg->cg_time; newcg->cg_old_time = cg->cg_old_time; newcg->cg_cgx = c; newcg->cg_ndblk = dmax - dbase; if (!is_ufs2) { if (c == fs->fs_ncg - 1) { /* Avoid fighting old fsck for this value. Its never used * outside of this check anyway. */ if ((fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) newcg->cg_old_ncyl = fs->fs_old_ncyl % fs->fs_old_cpg; else newcg->cg_old_ncyl = howmany(newcg->cg_ndblk, fs->fs_fpg / fs->fs_old_cpg); } else newcg->cg_old_ncyl = fs->fs_old_cpg; newcg->cg_old_niblk = fs->fs_ipg; newcg->cg_niblk = 0; } if (fs->fs_contigsumsize > 0) newcg->cg_nclusterblks = newcg->cg_ndblk / fs->fs_frag; newcg->cg_cs.cs_ndir = 0; newcg->cg_cs.cs_nffree = 0; newcg->cg_cs.cs_nbfree = 0; newcg->cg_cs.cs_nifree = fs->fs_ipg; if (cg->cg_rotor >= 0 && cg->cg_rotor < newcg->cg_ndblk) newcg->cg_rotor = cg->cg_rotor; else newcg->cg_rotor = 0; if (cg->cg_frotor >= 0 && cg->cg_frotor < newcg->cg_ndblk) newcg->cg_frotor = cg->cg_frotor; else newcg->cg_frotor = 0; if (cg->cg_irotor >= 0 && cg->cg_irotor < fs->fs_ipg) newcg->cg_irotor = cg->cg_irotor; else newcg->cg_irotor = 0; if (!is_ufs2) { newcg->cg_initediblk = 0; } else { if ((unsigned)cg->cg_initediblk > fs->fs_ipg) newcg->cg_initediblk = fs->fs_ipg; else newcg->cg_initediblk = cg->cg_initediblk; } memset(&newcg->cg_frsum[0], 0, sizeof newcg->cg_frsum); memset(&old_cg_blktot(newcg, 0)[0], 0, (size_t)(sumsize)); memset(cg_inosused(newcg, 0), 0, (size_t)(mapsize)); if (!is_ufs2 && ((fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) && fs->fs_old_postblformat == FS_42POSTBLFMT) ocg->cg_magic = CG_MAGIC; j = fs->fs_ipg * c; for (i = 0; i < fs->fs_ipg; j++, i++) { info = inoinfo(j); switch (info->ino_state) { case USTATE: break; case DSTATE: case DCLEAR: case DFOUND: newcg->cg_cs.cs_ndir++; /* fall through */ case FSTATE: case FCLEAR: newcg->cg_cs.cs_nifree--; setbit(cg_inosused(newcg, 0), i); break; default: if (j < ROOTINO) break; errexit("BAD STATE %d FOR INODE I=%ld", info->ino_state, (long)j); } } if (c == 0) for (i = 0; i < ROOTINO; i++) { setbit(cg_inosused(newcg, 0), i); newcg->cg_cs.cs_nifree--; } for (i = 0, d = dbase; d < dmax; d += fs->fs_frag, i += fs->fs_frag) { frags = 0; for (j = 0; j < fs->fs_frag; j++) { if (testbmap(d + j)) continue; setbit(cg_blksfree(newcg, 0), i + j); frags++; } if (frags == fs->fs_frag) { newcg->cg_cs.cs_nbfree++; if (sumsize) { j = old_cbtocylno(fs, i); old_cg_blktot(newcg, 0)[j]++; old_cg_blks(fs, newcg, j, 0)[old_cbtorpos(fs, i)]++; } if (fs->fs_contigsumsize > 0) setbit(cg_clustersfree(newcg, 0), fragstoblks(fs, i)); } else if (frags > 0) { newcg->cg_cs.cs_nffree += frags; blk = blkmap(fs, cg_blksfree(newcg, 0), i); ffs_fragacct(fs, blk, newcg->cg_frsum, 1, 0); } } if (fs->fs_contigsumsize > 0) { int32_t *sump = cg_clustersum(newcg, 0); u_char *mapp = cg_clustersfree(newcg, 0); int map = *mapp++; int bit = 1; int run = 0; for (i = 0; i < newcg->cg_nclusterblks; i++) { if ((map & bit) != 0) { run++; } else if (run != 0) { if (run > fs->fs_contigsumsize) run = fs->fs_contigsumsize; sump[run]++; run = 0; } if ((i & (NBBY - 1)) != (NBBY - 1)) { bit <<= 1; } else { map = *mapp++; bit = 1; } } if (run != 0) { if (run > fs->fs_contigsumsize) run = fs->fs_contigsumsize; sump[run]++; } } cstotal.cs_nffree += newcg->cg_cs.cs_nffree; cstotal.cs_nbfree += newcg->cg_cs.cs_nbfree; cstotal.cs_nifree += newcg->cg_cs.cs_nifree; cstotal.cs_ndir += newcg->cg_cs.cs_ndir; cs = &fs->fs_cs(fs, c); if (memcmp(&newcg->cg_cs, cs, sizeof *cs) != 0) { if (debug) { printf("cg %d: nffree: %d/%d nbfree %d/%d" " nifree %d/%d ndir %d/%d\n", c, cs->cs_nffree,newcg->cg_cs.cs_nffree, cs->cs_nbfree,newcg->cg_cs.cs_nbfree, cs->cs_nifree,newcg->cg_cs.cs_nifree, cs->cs_ndir,newcg->cg_cs.cs_ndir); } if (dofix(&idesc[0], "FREE BLK COUNT(S) WRONG IN SUPERBLK")) { memmove(cs, &newcg->cg_cs, sizeof *cs); sbdirty(); } else markclean = 0; } if (doinglevel1) { memmove(cg, newcg, (size_t)fs->fs_cgsize); cgdirty(); continue; } if ((memcmp(newcg, cg, basesize) != 0) || (memcmp(&old_cg_blktot(newcg, 0)[0], &old_cg_blktot(cg, 0)[0], sumsize) != 0)) { if (dofix(&idesc[2], "SUMMARY INFORMATION BAD")) { memmove(cg, newcg, (size_t)basesize); memmove(&old_cg_blktot(cg, 0)[0], &old_cg_blktot(newcg, 0)[0], (size_t)sumsize); cgdirty(); } else markclean = 0; } if (usedsoftdep) { for (i = 0; i < inomapsize; i++) { j = cg_inosused(newcg, 0)[i]; if ((cg_inosused(cg, 0)[i] & j) == j) continue; for (k = 0; k < NBBY; k++) { if ((j & (1 << k)) == 0) continue; if (cg_inosused(cg, 0)[i] & (1 << k)) continue; pwarn("ALLOCATED INODE %ld " "MARKED FREE\n", c * fs->fs_ipg + i * 8 + k); } } for (i = 0; i < blkmapsize; i++) { j = cg_blksfree(cg, 0)[i]; if ((cg_blksfree(newcg, 0)[i] & j) == j) continue; for (k = 0; k < NBBY; k++) { if ((j & (1 << k)) == 0) continue; if (cg_inosused(cg, 0)[i] & (1 << k)) continue; pwarn("ALLOCATED FRAG %ld " "MARKED FREE\n", c * fs->fs_fpg + i * 8 + k); } } } if (memcmp(cg_inosused(newcg, 0), cg_inosused(cg, 0), mapsize) != 0 && dofix(&idesc[1], "BLK(S) MISSING IN BIT MAPS")) { memmove(cg_inosused(cg, 0), cg_inosused(newcg, 0), (size_t)mapsize); cgdirty(); } } if (memcmp(&cstotal, &fs->fs_cstotal, cssize) != 0) { if (debug) { printf("total: nffree: %lld/%lld nbfree %lld/%lld" " nifree %lld/%lld ndir %lld/%lld\n", (long long int)fs->fs_cstotal.cs_nffree, (long long int)cstotal.cs_nffree, (long long int)fs->fs_cstotal.cs_nbfree, (long long int)cstotal.cs_nbfree, (long long int)fs->fs_cstotal.cs_nifree, (long long int)cstotal.cs_nifree, (long long int)fs->fs_cstotal.cs_ndir, (long long int)cstotal.cs_ndir); } if (dofix(&idesc[0], "FREE BLK COUNT(S) WRONG IN SUPERBLK")) { memmove(&fs->fs_cstotal, &cstotal, sizeof cstotal); fs->fs_ronly = 0; fs->fs_fmod = 0; sbdirty(); } else markclean = 0; } #ifdef PROGRESS if (!preen) progress_done(); #endif /* PROGRESS */ }
void pass5(void) { int c, i, j, blk, frags, basesize, mapsize; int inomapsize, blkmapsize; struct fs *fs = &sblock; ufs2_daddr_t d, dbase, dmax, start; int rewritecg = 0; struct csum *cs; struct csum_total cstotal; struct inodesc idesc[3]; char buf[MAXBSIZE]; struct cg *cg, *newcg = (struct cg *)buf; struct bufarea *cgbp; inoinfo(WINO)->ino_state = USTATE; memset(newcg, 0, (size_t)fs->fs_cgsize); newcg->cg_niblk = fs->fs_ipg; if (cvtlevel >= 3) { if (fs->fs_maxcontig < 2 && fs->fs_contigsumsize > 0) { if (preen) pwarn("DELETING CLUSTERING MAPS\n"); if (preen || reply("DELETE CLUSTERING MAPS")) { fs->fs_contigsumsize = 0; rewritecg = 1; sbdirty(); } } if (fs->fs_maxcontig > 1) { const char *doit = 0; if (fs->fs_contigsumsize < 1) { doit = "CREAT"; } else if (fs->fs_contigsumsize < fs->fs_maxcontig && fs->fs_contigsumsize < FS_MAXCONTIG) { doit = "EXPAND"; } if (doit) { i = fs->fs_contigsumsize; fs->fs_contigsumsize = MIN(fs->fs_maxcontig, FS_MAXCONTIG); if (CGSIZE(fs) > (u_int)fs->fs_bsize) { pwarn("CANNOT %s CLUSTER MAPS\n", doit); fs->fs_contigsumsize = i; } else if (preen || reply("CREATE CLUSTER MAPS")) { if (preen) pwarn("%sING CLUSTER MAPS\n", doit); fs->fs_cgsize = fragroundup(fs, CGSIZE(fs)); rewritecg = 1; sbdirty(); } } } } basesize = &newcg->cg_space[0] - (u_char *)(&newcg->cg_firstfield); if (sblock.fs_magic == FS_UFS2_MAGIC) { newcg->cg_iusedoff = basesize; } else { /* * We reserve the space for the old rotation summary * tables for the benefit of old kernels, but do not * maintain them in modern kernels. In time, they can * go away. */ newcg->cg_old_btotoff = basesize; newcg->cg_old_boff = newcg->cg_old_btotoff + fs->fs_old_cpg * sizeof(int32_t); newcg->cg_iusedoff = newcg->cg_old_boff + fs->fs_old_cpg * fs->fs_old_nrpos * sizeof(u_int16_t); memset(&newcg->cg_space[0], 0, newcg->cg_iusedoff - basesize); } inomapsize = howmany(fs->fs_ipg, CHAR_BIT); newcg->cg_freeoff = newcg->cg_iusedoff + inomapsize; blkmapsize = howmany(fs->fs_fpg, CHAR_BIT); newcg->cg_nextfreeoff = newcg->cg_freeoff + blkmapsize; if (fs->fs_contigsumsize > 0) { newcg->cg_clustersumoff = newcg->cg_nextfreeoff - sizeof(u_int32_t); newcg->cg_clustersumoff = roundup(newcg->cg_clustersumoff, sizeof(u_int32_t)); newcg->cg_clusteroff = newcg->cg_clustersumoff + (fs->fs_contigsumsize + 1) * sizeof(u_int32_t); newcg->cg_nextfreeoff = newcg->cg_clusteroff + howmany(fragstoblks(fs, fs->fs_fpg), CHAR_BIT); } newcg->cg_magic = CG_MAGIC; mapsize = newcg->cg_nextfreeoff - newcg->cg_iusedoff; memset(&idesc[0], 0, sizeof idesc); for (i = 0; i < 3; i++) idesc[i].id_type = ADDR; memset(&cstotal, 0, sizeof(struct csum_total)); dmax = blknum(fs, fs->fs_size + fs->fs_frag - 1); for (d = fs->fs_size; d < dmax; d++) setbmap(d); for (c = 0; c < fs->fs_ncg; c++) { if (got_siginfo) { printf("%s: phase 5: cyl group %d of %d (%d%%)\n", cdevname, c, sblock.fs_ncg, c * 100 / sblock.fs_ncg); got_siginfo = 0; } if (got_sigalarm) { setproctitle("%s p5 %d%%", cdevname, c * 100 / sblock.fs_ncg); got_sigalarm = 0; } cgbp = cgget(c); cg = cgbp->b_un.b_cg; if (!cg_chkmagic(cg)) pfatal("CG %d: BAD MAGIC NUMBER\n", c); newcg->cg_time = cg->cg_time; newcg->cg_old_time = cg->cg_old_time; newcg->cg_unrefs = cg->cg_unrefs; newcg->cg_cgx = c; dbase = cgbase(fs, c); dmax = dbase + fs->fs_fpg; if (dmax > fs->fs_size) dmax = fs->fs_size; newcg->cg_ndblk = dmax - dbase; if (fs->fs_magic == FS_UFS1_MAGIC) { if (c == fs->fs_ncg - 1) newcg->cg_old_ncyl = howmany(newcg->cg_ndblk, fs->fs_fpg / fs->fs_old_cpg); else newcg->cg_old_ncyl = fs->fs_old_cpg; newcg->cg_old_niblk = fs->fs_ipg; newcg->cg_niblk = 0; } if (fs->fs_contigsumsize > 0) newcg->cg_nclusterblks = newcg->cg_ndblk / fs->fs_frag; newcg->cg_cs.cs_ndir = 0; newcg->cg_cs.cs_nffree = 0; newcg->cg_cs.cs_nbfree = 0; newcg->cg_cs.cs_nifree = fs->fs_ipg; if (cg->cg_rotor >= 0 && cg->cg_rotor < newcg->cg_ndblk) newcg->cg_rotor = cg->cg_rotor; else newcg->cg_rotor = 0; if (cg->cg_frotor >= 0 && cg->cg_frotor < newcg->cg_ndblk) newcg->cg_frotor = cg->cg_frotor; else newcg->cg_frotor = 0; if (cg->cg_irotor >= 0 && cg->cg_irotor < fs->fs_ipg) newcg->cg_irotor = cg->cg_irotor; else newcg->cg_irotor = 0; if (fs->fs_magic == FS_UFS1_MAGIC) { newcg->cg_initediblk = 0; } else { if ((unsigned)cg->cg_initediblk > fs->fs_ipg) newcg->cg_initediblk = fs->fs_ipg; else newcg->cg_initediblk = cg->cg_initediblk; } memset(&newcg->cg_frsum[0], 0, sizeof newcg->cg_frsum); memset(cg_inosused(newcg), 0, (size_t)(mapsize)); j = fs->fs_ipg * c; for (i = 0; i < inostathead[c].il_numalloced; j++, i++) { switch (inoinfo(j)->ino_state) { case USTATE: break; case DSTATE: case DCLEAR: case DFOUND: case DZLINK: newcg->cg_cs.cs_ndir++; /* FALLTHROUGH */ case FSTATE: case FCLEAR: case FZLINK: newcg->cg_cs.cs_nifree--; setbit(cg_inosused(newcg), i); break; default: if (j < (int)ROOTINO) break; errx(EEXIT, "BAD STATE %d FOR INODE I=%d", inoinfo(j)->ino_state, j); } } if (c == 0) for (i = 0; i < (int)ROOTINO; i++) { setbit(cg_inosused(newcg), i); newcg->cg_cs.cs_nifree--; } start = -1; for (i = 0, d = dbase; d < dmax; d += fs->fs_frag, i += fs->fs_frag) { frags = 0; for (j = 0; j < fs->fs_frag; j++) { if (testbmap(d + j)) { if (Eflag && start != -1) { clear_blocks(start, d + j - 1); start = -1; } continue; } if (start == -1) start = d + j; setbit(cg_blksfree(newcg), i + j); frags++; } if (frags == fs->fs_frag) { newcg->cg_cs.cs_nbfree++; if (fs->fs_contigsumsize > 0) setbit(cg_clustersfree(newcg), i / fs->fs_frag); } else if (frags > 0) { newcg->cg_cs.cs_nffree += frags; blk = blkmap(fs, cg_blksfree(newcg), i); ffs_fragacct(fs, blk, newcg->cg_frsum, 1); } } if (Eflag && start != -1) clear_blocks(start, d - 1); if (fs->fs_contigsumsize > 0) { int32_t *sump = cg_clustersum(newcg); u_char *mapp = cg_clustersfree(newcg); int map = *mapp++; int bit = 1; int run = 0; for (i = 0; i < newcg->cg_nclusterblks; i++) { if ((map & bit) != 0) { run++; } else if (run != 0) { if (run > fs->fs_contigsumsize) run = fs->fs_contigsumsize; sump[run]++; run = 0; } if ((i & (CHAR_BIT - 1)) != (CHAR_BIT - 1)) { bit <<= 1; } else { map = *mapp++; bit = 1; } } if (run != 0) { if (run > fs->fs_contigsumsize) run = fs->fs_contigsumsize; sump[run]++; } } if (bkgrdflag != 0) { cstotal.cs_nffree += cg->cg_cs.cs_nffree; cstotal.cs_nbfree += cg->cg_cs.cs_nbfree; cstotal.cs_nifree += cg->cg_cs.cs_nifree; cstotal.cs_ndir += cg->cg_cs.cs_ndir; } else { cstotal.cs_nffree += newcg->cg_cs.cs_nffree; cstotal.cs_nbfree += newcg->cg_cs.cs_nbfree; cstotal.cs_nifree += newcg->cg_cs.cs_nifree; cstotal.cs_ndir += newcg->cg_cs.cs_ndir; } cs = &fs->fs_cs(fs, c); if (cursnapshot == 0 && memcmp(&newcg->cg_cs, cs, sizeof *cs) != 0 && dofix(&idesc[0], "FREE BLK COUNT(S) WRONG IN SUPERBLK")) { memmove(cs, &newcg->cg_cs, sizeof *cs); sbdirty(); } if (rewritecg) { memmove(cg, newcg, (size_t)fs->fs_cgsize); dirty(cgbp); continue; } if (cursnapshot == 0 && memcmp(newcg, cg, basesize) != 0 && dofix(&idesc[2], "SUMMARY INFORMATION BAD")) { memmove(cg, newcg, (size_t)basesize); dirty(cgbp); } if (bkgrdflag != 0 || usedsoftdep || debug) update_maps(cg, newcg, bkgrdflag); if (cursnapshot == 0 && memcmp(cg_inosused(newcg), cg_inosused(cg), mapsize) != 0 && dofix(&idesc[1], "BLK(S) MISSING IN BIT MAPS")) { memmove(cg_inosused(cg), cg_inosused(newcg), (size_t)mapsize); dirty(cgbp); } } if (cursnapshot == 0 && memcmp(&cstotal, &fs->fs_cstotal, sizeof cstotal) != 0 && dofix(&idesc[0], "SUMMARY BLK COUNT(S) WRONG IN SUPERBLK")) { memmove(&fs->fs_cstotal, &cstotal, sizeof cstotal); fs->fs_ronly = 0; fs->fs_fmod = 0; sbdirty(); } /* * When doing background fsck on a snapshot, figure out whether * the superblock summary is inaccurate and correct it when * necessary. */ if (cursnapshot != 0) { cmd.size = 1; cmd.value = cstotal.cs_ndir - fs->fs_cstotal.cs_ndir; if (cmd.value != 0) { if (debug) printf("adjndir by %+" PRIi64 "\n", cmd.value); if (bkgrdsumadj == 0 || sysctl(adjndir, MIBSIZE, 0, 0, &cmd, sizeof cmd) == -1) rwerror("ADJUST NUMBER OF DIRECTORIES", cmd.value); } cmd.value = cstotal.cs_nbfree - fs->fs_cstotal.cs_nbfree; if (cmd.value != 0) { if (debug) printf("adjnbfree by %+" PRIi64 "\n", cmd.value); if (bkgrdsumadj == 0 || sysctl(adjnbfree, MIBSIZE, 0, 0, &cmd, sizeof cmd) == -1) rwerror("ADJUST NUMBER OF FREE BLOCKS", cmd.value); } cmd.value = cstotal.cs_nifree - fs->fs_cstotal.cs_nifree; if (cmd.value != 0) { if (debug) printf("adjnifree by %+" PRIi64 "\n", cmd.value); if (bkgrdsumadj == 0 || sysctl(adjnifree, MIBSIZE, 0, 0, &cmd, sizeof cmd) == -1) rwerror("ADJUST NUMBER OF FREE INODES", cmd.value); } cmd.value = cstotal.cs_nffree - fs->fs_cstotal.cs_nffree; if (cmd.value != 0) { if (debug) printf("adjnffree by %+" PRIi64 "\n", cmd.value); if (bkgrdsumadj == 0 || sysctl(adjnffree, MIBSIZE, 0, 0, &cmd, sizeof cmd) == -1) rwerror("ADJUST NUMBER OF FREE FRAGS", cmd.value); } cmd.value = cstotal.cs_numclusters - fs->fs_cstotal.cs_numclusters; if (cmd.value != 0) { if (debug) printf("adjnumclusters by %+" PRIi64 "\n", cmd.value); if (bkgrdsumadj == 0 || sysctl(adjnumclusters, MIBSIZE, 0, 0, &cmd, sizeof cmd) == -1) rwerror("ADJUST NUMBER OF FREE CLUSTERS", cmd.value); } } }