void pass1(void) { struct inostat *info; struct inodesc idesc; struct bufarea *cgbp; struct cg *cgp; ino_t inumber, inosused, mininos; ufs2_daddr_t i, cgd; u_int8_t *cp; int c, rebuildcg; badblk = dupblk = lastino = 0; /* * Set file system reserved blocks in used block map. */ for (c = 0; c < sblock.fs_ncg; c++) { cgd = cgdmin(&sblock, c); if (c == 0) { i = cgbase(&sblock, c); } else i = cgsblock(&sblock, c); for (; i < cgd; i++) setbmap(i); } i = sblock.fs_csaddr; cgd = i + howmany(sblock.fs_cssize, sblock.fs_fsize); for (; i < cgd; i++) setbmap(i); /* * Find all allocated blocks. */ memset(&idesc, 0, sizeof(struct inodesc)); idesc.id_func = pass1check; n_files = n_blks = 0; for (c = 0; c < sblock.fs_ncg; c++) { inumber = c * sblock.fs_ipg; setinodebuf(inumber); cgbp = cgget(c); cgp = cgbp->b_un.b_cg; rebuildcg = 0; if (!check_cgmagic(c, cgbp)) rebuildcg = 1; if (!rebuildcg && sblock.fs_magic == FS_UFS2_MAGIC) { inosused = cgp->cg_initediblk; if (inosused > sblock.fs_ipg) { pfatal( "Too many initialized inodes (%ju > %d) in cylinder group %d\nReset to %d\n", (uintmax_t)inosused, sblock.fs_ipg, c, sblock.fs_ipg); inosused = sblock.fs_ipg; } } else { inosused = sblock.fs_ipg; } if (got_siginfo) { printf("%s: phase 1: cyl group %d of %d (%d%%)\n", cdevname, c, sblock.fs_ncg, c * 100 / sblock.fs_ncg); got_siginfo = 0; } if (got_sigalarm) { setproctitle("%s p1 %d%%", cdevname, c * 100 / sblock.fs_ncg); got_sigalarm = 0; } /* * If we are using soft updates, then we can trust the * cylinder group inode allocation maps to tell us which * inodes are allocated. We will scan the used inode map * to find the inodes that are really in use, and then * read only those inodes in from disk. */ if ((preen || inoopt) && usedsoftdep && !rebuildcg) { cp = &cg_inosused(cgp)[(inosused - 1) / CHAR_BIT]; for ( ; inosused > 0; inosused -= CHAR_BIT, cp--) { if (*cp == 0) continue; for (i = 1 << (CHAR_BIT - 1); i > 0; i >>= 1) { if (*cp & i) break; inosused--; } break; } if (inosused < 0) inosused = 0; } /* * Allocate inoinfo structures for the allocated inodes. */ inostathead[c].il_numalloced = inosused; if (inosused == 0) { inostathead[c].il_stat = NULL; continue; } info = Calloc((unsigned)inosused, sizeof(struct inostat)); if (info == NULL) errx(EEXIT, "cannot alloc %u bytes for inoinfo", (unsigned)(sizeof(struct inostat) * inosused)); inostathead[c].il_stat = info; /* * Scan the allocated inodes. */ for (i = 0; i < inosused; i++, inumber++) { if (inumber < ROOTINO) { (void)getnextinode(inumber, rebuildcg); continue; } /* * NULL return indicates probable end of allocated * inodes during cylinder group rebuild attempt. * We always keep trying until we get to the minimum * valid number for this cylinder group. */ if (checkinode(inumber, &idesc, rebuildcg) == 0 && i > cgp->cg_initediblk) break; } /* * This optimization speeds up future runs of fsck * by trimming down the number of inodes in cylinder * groups that formerly had many inodes but now have * fewer in use. */ mininos = roundup(inosused + INOPB(&sblock), INOPB(&sblock)); if (inoopt && !preen && !rebuildcg && sblock.fs_magic == FS_UFS2_MAGIC && cgp->cg_initediblk > 2 * INOPB(&sblock) && mininos < cgp->cg_initediblk) { i = cgp->cg_initediblk; if (mininos < 2 * INOPB(&sblock)) cgp->cg_initediblk = 2 * INOPB(&sblock); else cgp->cg_initediblk = mininos; pwarn("CYLINDER GROUP %d: RESET FROM %ju TO %d %s\n", c, i, cgp->cg_initediblk, "VALID INODES"); dirty(cgbp); } if (inosused < sblock.fs_ipg) continue; lastino += 1; if (lastino < (c * sblock.fs_ipg)) inosused = 0; else inosused = lastino - (c * sblock.fs_ipg); if (rebuildcg && inosused > cgp->cg_initediblk && sblock.fs_magic == FS_UFS2_MAGIC) { cgp->cg_initediblk = roundup(inosused, INOPB(&sblock)); pwarn("CYLINDER GROUP %d: FOUND %d VALID INODES\n", c, cgp->cg_initediblk); } /* * If we were not able to determine in advance which inodes * were in use, then reduce the size of the inoinfo structure * to the size necessary to describe the inodes that we * really found. */ if (inumber == lastino) continue; inostathead[c].il_numalloced = inosused; if (inosused == 0) { free(inostathead[c].il_stat); inostathead[c].il_stat = NULL; continue; } info = Calloc((unsigned)inosused, sizeof(struct inostat)); if (info == NULL) errx(EEXIT, "cannot alloc %u bytes for inoinfo", (unsigned)(sizeof(struct inostat) * inosused)); memmove(info, inostathead[c].il_stat, inosused * sizeof(*info)); free(inostathead[c].il_stat); inostathead[c].il_stat = info; }
void pass1(void) { ino_t inumber, inosused, ninosused; size_t inospace; struct inostat *info; int c; struct inodesc idesc; daddr_t i, cgd; u_int8_t *cp; /* * Set file system reserved blocks in used block map. */ for (c = 0; c < sblock.fs_ncg; c++) { cgd = cgdmin(&sblock, c); if (c == 0) i = cgbase(&sblock, c); else i = cgsblock(&sblock, c); for (; i < cgd; i++) setbmap(i); } i = sblock.fs_csaddr; cgd = i + howmany(sblock.fs_cssize, sblock.fs_fsize); for (; i < cgd; i++) setbmap(i); /* * Find all allocated blocks. */ memset(&idesc, 0, sizeof(struct inodesc)); idesc.id_type = ADDR; idesc.id_func = pass1check; n_files = n_blks = 0; info_inumber = 0; info_fn = pass1_info; for (c = 0; c < sblock.fs_ncg; c++) { inumber = c * sblock.fs_ipg; setinodebuf(inumber); getblk(&cgblk, cgtod(&sblock, c), sblock.fs_cgsize); if (sblock.fs_magic == FS_UFS2_MAGIC) { inosused = cgrp.cg_initediblk; if (inosused > sblock.fs_ipg) inosused = sblock.fs_ipg; } else inosused = sblock.fs_ipg; /* * If we are using soft updates, then we can trust the * cylinder group inode allocation maps to tell us which * inodes are allocated. We will scan the used inode map * to find the inodes that are really in use, and then * read only those inodes in from disk. */ if (preen && usedsoftdep) { cp = &cg_inosused(&cgrp)[(inosused - 1) / CHAR_BIT]; for ( ; inosused > 0; inosused -= CHAR_BIT, cp--) { if (*cp == 0) continue; for (i = 1 << (CHAR_BIT - 1); i > 0; i >>= 1) { if (*cp & i) break; inosused--; } break; } if (inosused < 0) inosused = 0; } /* * Allocate inoinfo structures for the allocated inodes. */ inostathead[c].il_numalloced = inosused; if (inosused == 0) { inostathead[c].il_stat = 0; continue; } info = calloc((unsigned)inosused, sizeof(struct inostat)); inospace = (unsigned)inosused * sizeof(struct inostat); if (info == NULL) errexit("cannot alloc %zu bytes for inoinfo", inospace); inostathead[c].il_stat = info; /* * Scan the allocated inodes. */ for (i = 0; i < inosused; i++, inumber++) { info_inumber = inumber; if (inumber < ROOTINO) { (void)getnextinode(inumber); continue; } checkinode(inumber, &idesc); } lastino += 1; if (inosused < sblock.fs_ipg || inumber == lastino) continue; /* * If we were not able to determine in advance which inodes * were in use, then reduce the size of the inoinfo structure * to the size necessary to describe the inodes that we * really found. */ if (lastino < (c * sblock.fs_ipg)) ninosused = 0; else ninosused = lastino - (c * sblock.fs_ipg); inostathead[c].il_numalloced = ninosused; if (ninosused == 0) { free(inostathead[c].il_stat); inostathead[c].il_stat = 0; continue; } if (ninosused != inosused) { struct inostat *ninfo; size_t ninospace; ninfo = reallocarray(info, ninosused, sizeof(*ninfo)); if (ninfo == NULL) { pfatal("too many inodes %llu, or out of memory\n", (unsigned long long)ninosused); exit(8); } ninospace = ninosused * sizeof(*ninfo); if (ninosused > inosused) memset(&ninfo[inosused], 0, ninospace - inospace); inostathead[c].il_stat = ninfo; } }
/* * Scan the specified file system to check quota(s) present on it. */ int chkquota(char *specname, struct quotafile *qfu, struct quotafile *qfg) { struct fileusage *fup; union dinode *dp; int cg, i, mode, errs = 0; ino_t ino, inosused, userino = 0, groupino = 0; dev_t dev, userdev = 0, groupdev = 0; struct stat sb; const char *mntpt; char *cp; if (qfu != NULL) mntpt = quota_fsname(qfu); else if (qfg != NULL) mntpt = quota_fsname(qfg); else errx(1, "null quotafile information passed to chkquota()\n"); if (cflag) { if (vflag && qfu != NULL) printf("%s: convert user quota to %d bits\n", mntpt, cflag); if (qfu != NULL && quota_convert(qfu, cflag) < 0) { if (errno == EBADF) errx(1, "%s: cannot convert an active quota file", mntpt); err(1, "user quota conversion to size %d failed", cflag); } if (vflag && qfg != NULL) printf("%s: convert group quota to %d bits\n", mntpt, cflag); if (qfg != NULL && quota_convert(qfg, cflag) < 0) { if (errno == EBADF) errx(1, "%s: cannot convert an active quota file", mntpt); err(1, "group quota conversion to size %d failed", cflag); } } if ((fi = open(specname, O_RDONLY, 0)) < 0) { warn("%s", specname); return (1); } if ((stat(mntpt, &sb)) < 0) { warn("%s", mntpt); return (1); } dev = sb.st_dev; if (vflag) { (void)printf("*** Checking "); if (qfu) (void)printf("user%s", qfg ? " and " : ""); if (qfg) (void)printf("group"); (void)printf(" quotas for %s (%s)\n", specname, mntpt); } if (qfu) { if (stat(quota_qfname(qfu), &sb) == 0) { userino = sb.st_ino; userdev = sb.st_dev; } } if (qfg) { if (stat(quota_qfname(qfg), &sb) == 0) { groupino = sb.st_ino; groupdev = sb.st_dev; } } sync(); dev_bsize = 1; for (i = 0; sblock_try[i] != -1; i++) { bread(sblock_try[i], (char *)&sblock, (long)SBLOCKSIZE); if ((sblock.fs_magic == FS_UFS1_MAGIC || (sblock.fs_magic == FS_UFS2_MAGIC && sblock.fs_sblockloc == sblock_try[i])) && sblock.fs_bsize <= MAXBSIZE && sblock.fs_bsize >= sizeof(struct fs)) break; } if (sblock_try[i] == -1) { warn("Cannot find file system superblock"); return (1); } dev_bsize = sblock.fs_fsize / fsbtodb(&sblock, 1); maxino = sblock.fs_ncg * sblock.fs_ipg; for (cg = 0; cg < sblock.fs_ncg; cg++) { ino = cg * sblock.fs_ipg; setinodebuf(ino); bread(fsbtodb(&sblock, cgtod(&sblock, cg)), (char *)(&cgblk), sblock.fs_cgsize); if (sblock.fs_magic == FS_UFS2_MAGIC) inosused = cgblk.cg_initediblk; else inosused = sblock.fs_ipg; /* * If we are using soft updates, then we can trust the * cylinder group inode allocation maps to tell us which * inodes are allocated. We will scan the used inode map * to find the inodes that are really in use, and then * read only those inodes in from disk. */ if (sblock.fs_flags & FS_DOSOFTDEP) { if (!cg_chkmagic(&cgblk)) errx(1, "CG %d: BAD MAGIC NUMBER\n", cg); cp = &cg_inosused(&cgblk)[(inosused - 1) / CHAR_BIT]; for ( ; inosused > 0; inosused -= CHAR_BIT, cp--) { if (*cp == 0) continue; for (i = 1 << (CHAR_BIT - 1); i > 0; i >>= 1) { if (*cp & i) break; inosused--; } break; } if (inosused <= 0) continue; } for (i = 0; i < inosused; i++, ino++) { if ((dp = getnextinode(ino)) == NULL || ino < ROOTINO || (mode = DIP(dp, di_mode) & IFMT) == 0) continue; /* * XXX: Do not account for UIDs or GIDs that appear * to be negative to prevent generating 100GB+ * quota files. */ if ((int)DIP(dp, di_uid) < 0 || (int)DIP(dp, di_gid) < 0) { if (vflag) { if (aflag) (void)printf("%s: ", mntpt); (void)printf("out of range UID/GID (%u/%u) ino=%ju\n", DIP(dp, di_uid), DIP(dp,di_gid), (uintmax_t)ino); } continue; } /* * Do not account for file system snapshot files * or the actual quota data files to be consistent * with how they are handled inside the kernel. */ #ifdef SF_SNAPSHOT if (DIP(dp, di_flags) & SF_SNAPSHOT) continue; #endif if ((ino == userino && dev == userdev) || (ino == groupino && dev == groupdev)) continue; if (qfg) { fup = addid((u_long)DIP(dp, di_gid), GRPQUOTA, (char *)0, mntpt); fup->fu_curinodes++; if (mode == IFREG || mode == IFDIR || mode == IFLNK) fup->fu_curblocks += DIP(dp, di_blocks); } if (qfu) { fup = addid((u_long)DIP(dp, di_uid), USRQUOTA, (char *)0, mntpt); fup->fu_curinodes++; if (mode == IFREG || mode == IFDIR || mode == IFLNK) fup->fu_curblocks += DIP(dp, di_blocks); } } } freeinodebuf(); if (qfu) errs += update(mntpt, qfu, USRQUOTA); if (qfg) errs += update(mntpt, qfg, GRPQUOTA); close(fi); (void)fflush(stdout); return (errs); }
/* * Scan the specified file system to check quota(s) present on it. */ int chkquota(const char *vfstype, const char *fsname, const char *mntpt, void *auxarg, pid_t *pidp) { struct quotaname *qnp = auxarg; struct fileusage *fup; union dinode *dp; int cg, i, mode, errs = 0, status; ino_t ino, inosused; pid_t pid; char *cp; switch (pid = fork()) { case -1: /* error */ warn("fork"); return 1; case 0: /* child */ if ((fi = open(fsname, O_RDONLY, 0)) < 0) err(1, "%s", fsname); sync(); dev_bsize = 1; for (i = 0; sblock_try[i] != -1; i++) { bread(sblock_try[i], (char *)&sblock, (long)SBLOCKSIZE); if ((sblock.fs_magic == FS_UFS1_MAGIC || (sblock.fs_magic == FS_UFS2_MAGIC && sblock.fs_sblockloc == sblock_try[i])) && sblock.fs_bsize <= MAXBSIZE && sblock.fs_bsize >= sizeof(struct fs)) break; } if (sblock_try[i] == -1) { warn("Cannot find file system superblock"); return (1); } dev_bsize = sblock.fs_fsize / fsbtodb(&sblock, 1); maxino = sblock.fs_ncg * sblock.fs_ipg; for (cg = 0; cg < sblock.fs_ncg; cg++) { ino = cg * sblock.fs_ipg; setinodebuf(ino); bread(fsbtodb(&sblock, cgtod(&sblock, cg)), (char *)(&cgblk), sblock.fs_cgsize); if (sblock.fs_magic == FS_UFS2_MAGIC) inosused = cgblk.cg_initediblk; else inosused = sblock.fs_ipg; /* * If we are using soft updates, then we can trust the * cylinder group inode allocation maps to tell us which * inodes are allocated. We will scan the used inode map * to find the inodes that are really in use, and then * read only those inodes in from disk. */ if (sblock.fs_flags & FS_DOSOFTDEP) { if (!cg_chkmagic(&cgblk)) errx(1, "CG %d: BAD MAGIC NUMBER\n", cg); cp = &cg_inosused(&cgblk)[(inosused - 1) / CHAR_BIT]; for ( ; inosused > 0; inosused -= CHAR_BIT, cp--) { if (*cp == 0) continue; for (i = 1 << (CHAR_BIT - 1); i > 0; i >>= 1) { if (*cp & i) break; inosused--; } break; } if (inosused <= 0) continue; } for (i = 0; i < inosused; i++, ino++) { if ((dp = getnextinode(ino)) == NULL || ino < ROOTINO || (mode = DIP(dp, di_mode) & IFMT) == 0) continue; if (qnp->flags & HASGRP) { fup = addid(DIP(dp, di_gid), GRPQUOTA, NULL); fup->fu_curinodes++; if (mode == IFREG || mode == IFDIR || mode == IFLNK) fup->fu_curblocks += DIP(dp, di_blocks); } if (qnp->flags & HASUSR) { fup = addid(DIP(dp, di_uid), USRQUOTA, NULL); fup->fu_curinodes++; if (mode == IFREG || mode == IFDIR || mode == IFLNK) fup->fu_curblocks += DIP(dp, di_blocks); } } } freeinodebuf(); if (flags&(CHECK_DEBUG|CHECK_VERBOSE)) { (void)printf("*** Checking "); if (qnp->flags & HASUSR) { (void)printf("%s", qfextension[USRQUOTA]); if (qnp->flags & HASGRP) (void)printf(" and "); } if (qnp->flags & HASGRP) (void)printf("%s", qfextension[GRPQUOTA]); (void)printf(" quotas for %s (%s), %swait\n", fsname, mntpt, pidp? "no" : ""); } if (qnp->flags & HASUSR) errs += update(mntpt, qnp->usrqfname, USRQUOTA); if (qnp->flags & HASGRP) errs += update(mntpt, qnp->grpqfname, GRPQUOTA); close(fi); exit (errs); break; default: /* parent */ if (pidp != NULL) { *pidp = pid; return 0; } if (waitpid(pid, &status, 0) < 0) { warn("waitpid"); return 1; } if (WIFEXITED(status)) { if (WEXITSTATUS(status) != 0) return WEXITSTATUS(status); } else if (WIFSIGNALED(status)) { warnx("%s: %s", fsname, strsignal(WTERMSIG(status))); return 1; } break; } return (0); }