/* * do_write() * Local routine to loop over a buffer and write it to a file * * Returns 0 on success, 1 on error. */ static int do_write(struct openfile *o, int pos, char *buf, int cnt) { uint x, step, blk, boff; char *blkp; /* * Loop across each block, putting our data into place */ for (x = 0; x < cnt; x += step) { /* * Calculate how much to take out of current block */ blkoff(pos, &boff, &step); if (step > (cnt - x)) { step = (cnt - x); } /* * Get next block. If not present yet, allocate * and add to the file. */ blk = blknum(pos); blkp = hash_lookup(o->o_blocks, blk); if (blkp == 0) { uint sz; sz = (blk ? BLOCKSIZE : BLOCKMIN); blkp = malloc(sz); if (blkp == 0) { return(1); } bzero(blkp, sz); if (hash_insert(o->o_blocks, blk, blkp)) { free(blkp); return(1); } } /* * Put contents into block */ memcpy(blkp+boff, buf+x, step); /* * Advance to next chunk */ pos += step; } return(0); }
/* * Allocate a block in a cylinder group. * * This algorithm implements the following policy: * 1) allocate the requested block. * 2) allocate a rotationally optimal block in the same cylinder. * 3) allocate the next available block on the block rotor for the * specified cylinder group. * Note that this routine only allocates fs_bsize blocks; these * blocks may be fragmented by the routine that allocates them. */ static daddr_t ffs_alloccgblk(struct inode *ip, struct buf *bp, daddr_t bpref) { struct cg *cgp; daddr_t blkno; int32_t bno; struct fs *fs = ip->i_fs; const int needswap = UFS_FSNEEDSWAP(fs); u_int8_t *blksfree_swap; cgp = (struct cg *)bp->b_data; blksfree_swap = cg_blksfree_swap(cgp, needswap); if (bpref == 0 || (uint32_t)dtog(fs, bpref) != ufs_rw32(cgp->cg_cgx, needswap)) { bpref = ufs_rw32(cgp->cg_rotor, needswap); } else { bpref = blknum(fs, bpref); bno = dtogd(fs, bpref); /* * if the requested block is available, use it */ if (ffs_isblock(fs, blksfree_swap, fragstoblks(fs, bno))) goto gotit; } /* * Take the next available one in this cylinder group. */ bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag); if (bno < 0) return (0); cgp->cg_rotor = ufs_rw32(bno, needswap); gotit: blkno = fragstoblks(fs, bno); ffs_clrblock(fs, blksfree_swap, (long)blkno); ffs_clusteracct(fs, cgp, blkno, -1); ufs_add32(cgp->cg_cs.cs_nbfree, -1, needswap); fs->fs_cstotal.cs_nbfree--; fs->fs_cs(fs, ufs_rw32(cgp->cg_cgx, needswap)).cs_nbfree--; fs->fs_fmod = 1; blkno = ufs_rw32(cgp->cg_cgx, needswap) * fs->fs_fpg + bno; return (blkno); }
void mkfs(struct partition *pp, char *fsys) { int fragsperinode, optimalfpg, origdensity, minfpg, lastminfpg; long i, j, csfrags; uint cg; time_t utime; quad_t sizepb; int width; ino_t maxinum; int minfragsperinode; /* minimum ratio of frags to inodes */ char tmpbuf[100]; /* XXX this will break in about 2,500 years */ union { struct fs fdummy; char cdummy[SBLOCKSIZE]; } dummy; #define fsdummy dummy.fdummy #define chdummy dummy.cdummy /* * Our blocks == sector size, and the version of UFS we are using is * specified by Oflag. */ disk.d_bsize = sectorsize; disk.d_ufs = Oflag; if (Rflag) { utime = 1000000000; } else { time(&utime); arc4random_stir(); } sblock.fs_old_flags = FS_FLAGS_UPDATED; sblock.fs_flags = 0; if (Uflag) sblock.fs_flags |= FS_DOSOFTDEP; if (Lflag) strlcpy(sblock.fs_volname, volumelabel, MAXVOLLEN); if (Jflag) sblock.fs_flags |= FS_GJOURNAL; if (lflag) sblock.fs_flags |= FS_MULTILABEL; if (tflag) sblock.fs_flags |= FS_TRIM; /* * Validate the given file system size. * Verify that its last block can actually be accessed. * Convert to file system fragment sized units. */ if (fssize <= 0) { printf("preposterous size %jd\n", (intmax_t)fssize); exit(13); } wtfs(fssize - (realsectorsize / DEV_BSIZE), realsectorsize, (char *)&sblock); /* * collect and verify the file system density info */ sblock.fs_avgfilesize = avgfilesize; sblock.fs_avgfpdir = avgfilesperdir; if (sblock.fs_avgfilesize <= 0) printf("illegal expected average file size %d\n", sblock.fs_avgfilesize), exit(14); if (sblock.fs_avgfpdir <= 0) printf("illegal expected number of files per directory %d\n", sblock.fs_avgfpdir), exit(15); restart: /* * collect and verify the block and fragment sizes */ sblock.fs_bsize = bsize; sblock.fs_fsize = fsize; if (!POWEROF2(sblock.fs_bsize)) { printf("block size must be a power of 2, not %d\n", sblock.fs_bsize); exit(16); } if (!POWEROF2(sblock.fs_fsize)) { printf("fragment size must be a power of 2, not %d\n", sblock.fs_fsize); exit(17); } if (sblock.fs_fsize < sectorsize) { printf("increasing fragment size from %d to sector size (%d)\n", sblock.fs_fsize, sectorsize); sblock.fs_fsize = sectorsize; } if (sblock.fs_bsize > MAXBSIZE) { printf("decreasing block size from %d to maximum (%d)\n", sblock.fs_bsize, MAXBSIZE); sblock.fs_bsize = MAXBSIZE; } if (sblock.fs_bsize < MINBSIZE) { printf("increasing block size from %d to minimum (%d)\n", sblock.fs_bsize, MINBSIZE); sblock.fs_bsize = MINBSIZE; } if (sblock.fs_fsize > MAXBSIZE) { printf("decreasing fragment size from %d to maximum (%d)\n", sblock.fs_fsize, MAXBSIZE); sblock.fs_fsize = MAXBSIZE; } if (sblock.fs_bsize < sblock.fs_fsize) { printf("increasing block size from %d to fragment size (%d)\n", sblock.fs_bsize, sblock.fs_fsize); sblock.fs_bsize = sblock.fs_fsize; } if (sblock.fs_fsize * MAXFRAG < sblock.fs_bsize) { printf( "increasing fragment size from %d to block size / %d (%d)\n", sblock.fs_fsize, MAXFRAG, sblock.fs_bsize / MAXFRAG); sblock.fs_fsize = sblock.fs_bsize / MAXFRAG; } if (maxbsize == 0) maxbsize = bsize; if (maxbsize < bsize || !POWEROF2(maxbsize)) { sblock.fs_maxbsize = sblock.fs_bsize; printf("Extent size set to %d\n", sblock.fs_maxbsize); } else if (sblock.fs_maxbsize > FS_MAXCONTIG * sblock.fs_bsize) { sblock.fs_maxbsize = FS_MAXCONTIG * sblock.fs_bsize; printf("Extent size reduced to %d\n", sblock.fs_maxbsize); } else { sblock.fs_maxbsize = maxbsize; } /* * Maxcontig sets the default for the maximum number of blocks * that may be allocated sequentially. With file system clustering * it is possible to allocate contiguous blocks up to the maximum * transfer size permitted by the controller or buffering. */ if (maxcontig == 0) maxcontig = MAX(1, MAXPHYS / bsize); sblock.fs_maxcontig = maxcontig; if (sblock.fs_maxcontig < sblock.fs_maxbsize / sblock.fs_bsize) { sblock.fs_maxcontig = sblock.fs_maxbsize / sblock.fs_bsize; printf("Maxcontig raised to %d\n", sblock.fs_maxbsize); } if (sblock.fs_maxcontig > 1) sblock.fs_contigsumsize = MIN(sblock.fs_maxcontig,FS_MAXCONTIG); sblock.fs_bmask = ~(sblock.fs_bsize - 1); sblock.fs_fmask = ~(sblock.fs_fsize - 1); sblock.fs_qbmask = ~sblock.fs_bmask; sblock.fs_qfmask = ~sblock.fs_fmask; sblock.fs_bshift = ilog2(sblock.fs_bsize); sblock.fs_fshift = ilog2(sblock.fs_fsize); sblock.fs_frag = numfrags(&sblock, sblock.fs_bsize); sblock.fs_fragshift = ilog2(sblock.fs_frag); if (sblock.fs_frag > MAXFRAG) { printf("fragment size %d is still too small (can't happen)\n", sblock.fs_bsize / MAXFRAG); exit(21); } sblock.fs_fsbtodb = ilog2(sblock.fs_fsize / sectorsize); sblock.fs_size = fssize = dbtofsb(&sblock, fssize); sblock.fs_providersize = dbtofsb(&sblock, mediasize / sectorsize); /* * Before the filesystem is finally initialized, mark it * as incompletely initialized. */ sblock.fs_magic = FS_BAD_MAGIC; if (Oflag == 1) { sblock.fs_sblockloc = SBLOCK_UFS1; sblock.fs_nindir = sblock.fs_bsize / sizeof(ufs1_daddr_t); sblock.fs_inopb = sblock.fs_bsize / sizeof(struct ufs1_dinode); sblock.fs_maxsymlinklen = ((NDADDR + NIADDR) * sizeof(ufs1_daddr_t)); sblock.fs_old_inodefmt = FS_44INODEFMT; sblock.fs_old_cgoffset = 0; sblock.fs_old_cgmask = 0xffffffff; sblock.fs_old_size = sblock.fs_size; sblock.fs_old_rotdelay = 0; sblock.fs_old_rps = 60; sblock.fs_old_nspf = sblock.fs_fsize / sectorsize; sblock.fs_old_cpg = 1; sblock.fs_old_interleave = 1; sblock.fs_old_trackskew = 0; sblock.fs_old_cpc = 0; sblock.fs_old_postblformat = 1; sblock.fs_old_nrpos = 1; } else { sblock.fs_sblockloc = SBLOCK_UFS2; sblock.fs_nindir = sblock.fs_bsize / sizeof(ufs2_daddr_t); sblock.fs_inopb = sblock.fs_bsize / sizeof(struct ufs2_dinode); sblock.fs_maxsymlinklen = ((NDADDR + NIADDR) * sizeof(ufs2_daddr_t)); } sblock.fs_sblkno = roundup(howmany(sblock.fs_sblockloc + SBLOCKSIZE, sblock.fs_fsize), sblock.fs_frag); sblock.fs_cblkno = sblock.fs_sblkno + roundup(howmany(SBLOCKSIZE, sblock.fs_fsize), sblock.fs_frag); sblock.fs_iblkno = sblock.fs_cblkno + sblock.fs_frag; sblock.fs_maxfilesize = sblock.fs_bsize * NDADDR - 1; for (sizepb = sblock.fs_bsize, i = 0; i < NIADDR; i++) { sizepb *= NINDIR(&sblock); sblock.fs_maxfilesize += sizepb; } /* * It's impossible to create a snapshot in case that fs_maxfilesize * is smaller than the fssize. */ if (sblock.fs_maxfilesize < (u_quad_t)fssize) { warnx("WARNING: You will be unable to create snapshots on this " "file system. Correct by using a larger blocksize."); } /* * Calculate the number of blocks to put into each cylinder group. * * This algorithm selects the number of blocks per cylinder * group. The first goal is to have at least enough data blocks * in each cylinder group to meet the density requirement. Once * this goal is achieved we try to expand to have at least * MINCYLGRPS cylinder groups. Once this goal is achieved, we * pack as many blocks into each cylinder group map as will fit. * * We start by calculating the smallest number of blocks that we * can put into each cylinder group. If this is too big, we reduce * the density until it fits. */ maxinum = (((int64_t)(1)) << 32) - INOPB(&sblock); minfragsperinode = 1 + fssize / maxinum; if (density == 0) { density = MAX(NFPI, minfragsperinode) * fsize; } else if (density < minfragsperinode * fsize) { origdensity = density; density = minfragsperinode * fsize; fprintf(stderr, "density increased from %d to %d\n", origdensity, density); } origdensity = density; for (;;) { fragsperinode = MAX(numfrags(&sblock, density), 1); if (fragsperinode < minfragsperinode) { bsize <<= 1; fsize <<= 1; printf("Block size too small for a file system %s %d\n", "of this size. Increasing blocksize to", bsize); goto restart; } minfpg = fragsperinode * INOPB(&sblock); if (minfpg > sblock.fs_size) minfpg = sblock.fs_size; sblock.fs_ipg = INOPB(&sblock); sblock.fs_fpg = roundup(sblock.fs_iblkno + sblock.fs_ipg / INOPF(&sblock), sblock.fs_frag); if (sblock.fs_fpg < minfpg) sblock.fs_fpg = minfpg; sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode), INOPB(&sblock)); sblock.fs_fpg = roundup(sblock.fs_iblkno + sblock.fs_ipg / INOPF(&sblock), sblock.fs_frag); if (sblock.fs_fpg < minfpg) sblock.fs_fpg = minfpg; sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode), INOPB(&sblock)); if (CGSIZE(&sblock) < (unsigned long)sblock.fs_bsize) break; density -= sblock.fs_fsize; } if (density != origdensity) printf("density reduced from %d to %d\n", origdensity, density); /* * Start packing more blocks into the cylinder group until * it cannot grow any larger, the number of cylinder groups * drops below MINCYLGRPS, or we reach the size requested. * For UFS1 inodes per cylinder group are stored in an int16_t * so fs_ipg is limited to 2^15 - 1. */ for ( ; sblock.fs_fpg < maxblkspercg; sblock.fs_fpg += sblock.fs_frag) { sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode), INOPB(&sblock)); if (Oflag > 1 || (Oflag == 1 && sblock.fs_ipg <= 0x7fff)) { if (sblock.fs_size / sblock.fs_fpg < MINCYLGRPS) break; if (CGSIZE(&sblock) < (unsigned long)sblock.fs_bsize) continue; if (CGSIZE(&sblock) == (unsigned long)sblock.fs_bsize) break; } sblock.fs_fpg -= sblock.fs_frag; sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode), INOPB(&sblock)); break; } /* * Check to be sure that the last cylinder group has enough blocks * to be viable. If it is too small, reduce the number of blocks * per cylinder group which will have the effect of moving more * blocks into the last cylinder group. */ optimalfpg = sblock.fs_fpg; for (;;) { sblock.fs_ncg = howmany(sblock.fs_size, sblock.fs_fpg); lastminfpg = roundup(sblock.fs_iblkno + sblock.fs_ipg / INOPF(&sblock), sblock.fs_frag); if (sblock.fs_size < lastminfpg) { printf("Filesystem size %jd < minimum size of %d\n", (intmax_t)sblock.fs_size, lastminfpg); exit(28); } if (sblock.fs_size % sblock.fs_fpg >= lastminfpg || sblock.fs_size % sblock.fs_fpg == 0) break; sblock.fs_fpg -= sblock.fs_frag; sblock.fs_ipg = roundup(howmany(sblock.fs_fpg, fragsperinode), INOPB(&sblock)); } if (optimalfpg != sblock.fs_fpg) printf("Reduced frags per cylinder group from %d to %d %s\n", optimalfpg, sblock.fs_fpg, "to enlarge last cyl group"); sblock.fs_cgsize = fragroundup(&sblock, CGSIZE(&sblock)); sblock.fs_dblkno = sblock.fs_iblkno + sblock.fs_ipg / INOPF(&sblock); if (Oflag == 1) { sblock.fs_old_spc = sblock.fs_fpg * sblock.fs_old_nspf; sblock.fs_old_nsect = sblock.fs_old_spc; sblock.fs_old_npsect = sblock.fs_old_spc; sblock.fs_old_ncyl = sblock.fs_ncg; } /* * fill in remaining fields of the super block */ sblock.fs_csaddr = cgdmin(&sblock, 0); sblock.fs_cssize = fragroundup(&sblock, sblock.fs_ncg * sizeof(struct csum)); fscs = (struct csum *)calloc(1, sblock.fs_cssize); if (fscs == NULL) errx(31, "calloc failed"); sblock.fs_sbsize = fragroundup(&sblock, sizeof(struct fs)); if (sblock.fs_sbsize > SBLOCKSIZE) sblock.fs_sbsize = SBLOCKSIZE; sblock.fs_minfree = minfree; if (metaspace > 0 && metaspace < sblock.fs_fpg / 2) sblock.fs_metaspace = blknum(&sblock, metaspace); else if (metaspace != -1) /* reserve half of minfree for metadata blocks */ sblock.fs_metaspace = blknum(&sblock, (sblock.fs_fpg * minfree) / 200); if (maxbpg == 0) sblock.fs_maxbpg = MAXBLKPG(sblock.fs_bsize); else sblock.fs_maxbpg = maxbpg; sblock.fs_optim = opt; sblock.fs_cgrotor = 0; sblock.fs_pendingblocks = 0; sblock.fs_pendinginodes = 0; sblock.fs_fmod = 0; sblock.fs_ronly = 0; sblock.fs_state = 0; sblock.fs_clean = 1; sblock.fs_id[0] = (long)utime; sblock.fs_id[1] = newfs_random(); sblock.fs_fsmnt[0] = '\0'; csfrags = howmany(sblock.fs_cssize, sblock.fs_fsize); sblock.fs_dsize = sblock.fs_size - sblock.fs_sblkno - sblock.fs_ncg * (sblock.fs_dblkno - sblock.fs_sblkno); sblock.fs_cstotal.cs_nbfree = fragstoblks(&sblock, sblock.fs_dsize) - howmany(csfrags, sblock.fs_frag); sblock.fs_cstotal.cs_nffree = fragnum(&sblock, sblock.fs_size) + (fragnum(&sblock, csfrags) > 0 ? sblock.fs_frag - fragnum(&sblock, csfrags) : 0); sblock.fs_cstotal.cs_nifree = sblock.fs_ncg * sblock.fs_ipg - ROOTINO; sblock.fs_cstotal.cs_ndir = 0; sblock.fs_dsize -= csfrags; sblock.fs_time = utime; if (Oflag == 1) { sblock.fs_old_time = utime; sblock.fs_old_dsize = sblock.fs_dsize; sblock.fs_old_csaddr = sblock.fs_csaddr; sblock.fs_old_cstotal.cs_ndir = sblock.fs_cstotal.cs_ndir; sblock.fs_old_cstotal.cs_nbfree = sblock.fs_cstotal.cs_nbfree; sblock.fs_old_cstotal.cs_nifree = sblock.fs_cstotal.cs_nifree; sblock.fs_old_cstotal.cs_nffree = sblock.fs_cstotal.cs_nffree; } /* * Dump out summary information about file system. */ # define B2MBFACTOR (1 / (1024.0 * 1024.0)) printf("%s: %.1fMB (%jd sectors) block size %d, fragment size %d\n", fsys, (float)sblock.fs_size * sblock.fs_fsize * B2MBFACTOR, (intmax_t)fsbtodb(&sblock, sblock.fs_size), sblock.fs_bsize, sblock.fs_fsize); printf("\tusing %d cylinder groups of %.2fMB, %d blks, %d inodes.\n", sblock.fs_ncg, (float)sblock.fs_fpg * sblock.fs_fsize * B2MBFACTOR, sblock.fs_fpg / sblock.fs_frag, sblock.fs_ipg); if (sblock.fs_flags & FS_DOSOFTDEP) printf("\twith soft updates\n"); # undef B2MBFACTOR if (Eflag && !Nflag) { printf("Erasing sectors [%jd...%jd]\n", sblock.fs_sblockloc / disk.d_bsize, fsbtodb(&sblock, sblock.fs_size) - 1); berase(&disk, sblock.fs_sblockloc / disk.d_bsize, sblock.fs_size * sblock.fs_fsize - sblock.fs_sblockloc); } /* * Wipe out old UFS1 superblock(s) if necessary. */ if (!Nflag && Oflag != 1) { i = bread(&disk, part_ofs + SBLOCK_UFS1 / disk.d_bsize, chdummy, SBLOCKSIZE); if (i == -1) err(1, "can't read old UFS1 superblock: %s", disk.d_error); if (fsdummy.fs_magic == FS_UFS1_MAGIC) { fsdummy.fs_magic = 0; bwrite(&disk, part_ofs + SBLOCK_UFS1 / disk.d_bsize, chdummy, SBLOCKSIZE); for (cg = 0; cg < fsdummy.fs_ncg; cg++) { if (fsbtodb(&fsdummy, cgsblock(&fsdummy, cg)) > fssize) break; bwrite(&disk, part_ofs + fsbtodb(&fsdummy, cgsblock(&fsdummy, cg)), chdummy, SBLOCKSIZE); } } } if (!Nflag) do_sbwrite(&disk); if (Xflag == 1) { printf("** Exiting on Xflag 1\n"); exit(0); } if (Xflag == 2) printf("** Leaving BAD MAGIC on Xflag 2\n"); else sblock.fs_magic = (Oflag != 1) ? FS_UFS2_MAGIC : FS_UFS1_MAGIC; /* * Now build the cylinders group blocks and * then print out indices of cylinder groups. */ printf("super-block backups (for fsck -b #) at:\n"); i = 0; width = charsperline(); /* * allocate space for superblock, cylinder group map, and * two sets of inode blocks. */ if (sblock.fs_bsize < SBLOCKSIZE) iobufsize = SBLOCKSIZE + 3 * sblock.fs_bsize; else iobufsize = 4 * sblock.fs_bsize; if ((iobuf = calloc(1, iobufsize)) == 0) { printf("Cannot allocate I/O buffer\n"); exit(38); } /* * Make a copy of the superblock into the buffer that we will be * writing out in each cylinder group. */ bcopy((char *)&sblock, iobuf, SBLOCKSIZE); for (cg = 0; cg < sblock.fs_ncg; cg++) { initcg(cg, utime); j = snprintf(tmpbuf, sizeof(tmpbuf), " %jd%s", (intmax_t)fsbtodb(&sblock, cgsblock(&sblock, cg)), cg < (sblock.fs_ncg-1) ? "," : ""); if (j < 0) tmpbuf[j = 0] = '\0'; if (i + j >= width) { printf("\n"); i = 0; } i += j; printf("%s", tmpbuf); fflush(stdout); } printf("\n"); if (Nflag) exit(0); /* * Now construct the initial file system, * then write out the super-block. */ fsinit(utime); if (Oflag == 1) { sblock.fs_old_cstotal.cs_ndir = sblock.fs_cstotal.cs_ndir; sblock.fs_old_cstotal.cs_nbfree = sblock.fs_cstotal.cs_nbfree; sblock.fs_old_cstotal.cs_nifree = sblock.fs_cstotal.cs_nifree; sblock.fs_old_cstotal.cs_nffree = sblock.fs_cstotal.cs_nffree; } if (Xflag == 3) { printf("** Exiting on Xflag 3\n"); exit(0); } if (!Nflag) { do_sbwrite(&disk); /* * For UFS1 filesystems with a blocksize of 64K, the first * alternate superblock resides at the location used for * the default UFS2 superblock. As there is a valid * superblock at this location, the boot code will use * it as its first choice. Thus we have to ensure that * all of its statistcs on usage are correct. */ if (Oflag == 1 && sblock.fs_bsize == 65536) wtfs(fsbtodb(&sblock, cgsblock(&sblock, 0)), sblock.fs_bsize, (char *)&sblock); } for (i = 0; i < sblock.fs_cssize; i += sblock.fs_bsize) wtfs(fsbtodb(&sblock, sblock.fs_csaddr + numfrags(&sblock, i)), sblock.fs_cssize - i < sblock.fs_bsize ? sblock.fs_cssize - i : sblock.fs_bsize, ((char *)fscs) + i); /* * Update information about this partition in pack * label, to that it may be updated on disk. */ if (pp != NULL) { pp->p_fstype = FS_BSDFFS; pp->p_fsize = sblock.fs_fsize; pp->p_frag = sblock.fs_frag; pp->p_cpg = sblock.fs_fpg; } }
int main(int argc, char *argv[]) { const char *avalue, *jvalue, *Jvalue, *Lvalue, *lvalue, *Nvalue, *nvalue; const char *tvalue; const char *special, *on; const char *name; int active; int Aflag, aflag, eflag, evalue, fflag, fvalue, jflag, Jflag, kflag; int kvalue, Lflag, lflag, mflag, mvalue, Nflag, nflag, oflag, ovalue; int pflag, sflag, svalue, Svalue, tflag; int ch, found_arg, i; const char *chg[2]; struct ufs_args args; struct statfs stfs; if (argc < 3) usage(); Aflag = aflag = eflag = fflag = jflag = Jflag = kflag = Lflag = 0; lflag = mflag = Nflag = nflag = oflag = pflag = sflag = tflag = 0; avalue = jvalue = Jvalue = Lvalue = lvalue = Nvalue = nvalue = NULL; evalue = fvalue = mvalue = ovalue = svalue = Svalue = 0; active = 0; found_arg = 0; /* At least one arg is required. */ while ((ch = getopt(argc, argv, "Aa:e:f:j:J:k:L:l:m:N:n:o:ps:S:t:")) != -1) switch (ch) { case 'A': found_arg = 1; Aflag++; break; case 'a': found_arg = 1; name = "POSIX.1e ACLs"; avalue = optarg; if (strcmp(avalue, "enable") && strcmp(avalue, "disable")) { errx(10, "bad %s (options are %s)", name, "`enable' or `disable'"); } aflag = 1; break; case 'e': found_arg = 1; name = "maximum blocks per file in a cylinder group"; evalue = atoi(optarg); if (evalue < 1) errx(10, "%s must be >= 1 (was %s)", name, optarg); eflag = 1; break; case 'f': found_arg = 1; name = "average file size"; fvalue = atoi(optarg); if (fvalue < 1) errx(10, "%s must be >= 1 (was %s)", name, optarg); fflag = 1; break; case 'j': found_arg = 1; name = "softdep journaled file system"; jvalue = optarg; if (strcmp(jvalue, "enable") && strcmp(jvalue, "disable")) { errx(10, "bad %s (options are %s)", name, "`enable' or `disable'"); } jflag = 1; break; case 'J': found_arg = 1; name = "gjournaled file system"; Jvalue = optarg; if (strcmp(Jvalue, "enable") && strcmp(Jvalue, "disable")) { errx(10, "bad %s (options are %s)", name, "`enable' or `disable'"); } Jflag = 1; break; case 'k': found_arg = 1; name = "space to hold for metadata blocks"; kvalue = atoi(optarg); if (kvalue < 0) errx(10, "bad %s (%s)", name, optarg); kflag = 1; break; case 'L': found_arg = 1; name = "volume label"; Lvalue = optarg; i = -1; while (isalnum(Lvalue[++i])); if (Lvalue[i] != '\0') { errx(10, "bad %s. Valid characters are alphanumerics.", name); } if (strlen(Lvalue) >= MAXVOLLEN) { errx(10, "bad %s. Length is longer than %d.", name, MAXVOLLEN - 1); } Lflag = 1; break; case 'l': found_arg = 1; name = "multilabel MAC file system"; lvalue = optarg; if (strcmp(lvalue, "enable") && strcmp(lvalue, "disable")) { errx(10, "bad %s (options are %s)", name, "`enable' or `disable'"); } lflag = 1; break; case 'm': found_arg = 1; name = "minimum percentage of free space"; mvalue = atoi(optarg); if (mvalue < 0 || mvalue > 99) errx(10, "bad %s (%s)", name, optarg); mflag = 1; break; case 'N': found_arg = 1; name = "NFSv4 ACLs"; Nvalue = optarg; if (strcmp(Nvalue, "enable") && strcmp(Nvalue, "disable")) { errx(10, "bad %s (options are %s)", name, "`enable' or `disable'"); } Nflag = 1; break; case 'n': found_arg = 1; name = "soft updates"; nvalue = optarg; if (strcmp(nvalue, "enable") != 0 && strcmp(nvalue, "disable") != 0) { errx(10, "bad %s (options are %s)", name, "`enable' or `disable'"); } nflag = 1; break; case 'o': found_arg = 1; name = "optimization preference"; if (strcmp(optarg, "space") == 0) ovalue = FS_OPTSPACE; else if (strcmp(optarg, "time") == 0) ovalue = FS_OPTTIME; else errx(10, "bad %s (options are `space' or `time')", name); oflag = 1; break; case 'p': found_arg = 1; pflag = 1; break; case 's': found_arg = 1; name = "expected number of files per directory"; svalue = atoi(optarg); if (svalue < 1) errx(10, "%s must be >= 1 (was %s)", name, optarg); sflag = 1; break; case 'S': found_arg = 1; name = "Softdep Journal Size"; Svalue = atoi(optarg); if (Svalue < SUJ_MIN) errx(10, "%s must be >= %d (was %s)", name, SUJ_MIN, optarg); break; case 't': found_arg = 1; name = "trim"; tvalue = optarg; if (strcmp(tvalue, "enable") != 0 && strcmp(tvalue, "disable") != 0) { errx(10, "bad %s (options are %s)", name, "`enable' or `disable'"); } tflag = 1; break; default: usage(); } argc -= optind; argv += optind; if (found_arg == 0 || argc != 1) usage(); on = special = argv[0]; if (ufs_disk_fillout(&disk, special) == -1) goto err; if (disk.d_name != special) { if (statfs(special, &stfs) != 0) warn("Can't stat %s", special); if (strcmp(special, stfs.f_mntonname) == 0) active = 1; } if (pflag) { printfs(); exit(0); } if (Lflag) { name = "volume label"; strlcpy(sblock.fs_volname, Lvalue, MAXVOLLEN); } if (aflag) { name = "POSIX.1e ACLs"; if (strcmp(avalue, "enable") == 0) { if (sblock.fs_flags & FS_ACLS) { warnx("%s remains unchanged as enabled", name); } else if (sblock.fs_flags & FS_NFS4ACLS) { warnx("%s and NFSv4 ACLs are mutually " "exclusive", name); } else { sblock.fs_flags |= FS_ACLS; warnx("%s set", name); } } else if (strcmp(avalue, "disable") == 0) { if ((~sblock.fs_flags & FS_ACLS) == FS_ACLS) { warnx("%s remains unchanged as disabled", name); } else { sblock.fs_flags &= ~FS_ACLS; warnx("%s cleared", name); } } } if (eflag) { name = "maximum blocks per file in a cylinder group"; if (sblock.fs_maxbpg == evalue) warnx("%s remains unchanged as %d", name, evalue); else { warnx("%s changes from %d to %d", name, sblock.fs_maxbpg, evalue); sblock.fs_maxbpg = evalue; } } if (fflag) { name = "average file size"; if (sblock.fs_avgfilesize == (unsigned)fvalue) { warnx("%s remains unchanged as %d", name, fvalue); } else { warnx("%s changes from %d to %d", name, sblock.fs_avgfilesize, fvalue); sblock.fs_avgfilesize = fvalue; } } if (jflag) { name = "soft updates journaling"; if (strcmp(jvalue, "enable") == 0) { if ((sblock.fs_flags & (FS_DOSOFTDEP | FS_SUJ)) == (FS_DOSOFTDEP | FS_SUJ)) { warnx("%s remains unchanged as enabled", name); } else if (sblock.fs_clean == 0) { warnx("%s cannot be enabled until fsck is run", name); } else if (journal_alloc(Svalue) != 0) { warnx("%s can not be enabled", name); } else { sblock.fs_flags |= FS_DOSOFTDEP | FS_SUJ; warnx("%s set", name); } } else if (strcmp(jvalue, "disable") == 0) { if ((~sblock.fs_flags & FS_SUJ) == FS_SUJ) { warnx("%s remains unchanged as disabled", name); } else { journal_clear(); sblock.fs_flags &= ~FS_SUJ; sblock.fs_sujfree = 0; warnx("%s cleared but soft updates still set.", name); warnx("remove .sujournal to reclaim space"); } } } if (Jflag) { name = "gjournal"; if (strcmp(Jvalue, "enable") == 0) { if (sblock.fs_flags & FS_GJOURNAL) { warnx("%s remains unchanged as enabled", name); } else { sblock.fs_flags |= FS_GJOURNAL; warnx("%s set", name); } } else if (strcmp(Jvalue, "disable") == 0) { if ((~sblock.fs_flags & FS_GJOURNAL) == FS_GJOURNAL) { warnx("%s remains unchanged as disabled", name); } else { sblock.fs_flags &= ~FS_GJOURNAL; warnx("%s cleared", name); } } } if (kflag) { name = "space to hold for metadata blocks"; if (sblock.fs_metaspace == kvalue) warnx("%s remains unchanged as %d", name, kvalue); else { kvalue = blknum(&sblock, kvalue); if (kvalue > sblock.fs_fpg / 2) { kvalue = blknum(&sblock, sblock.fs_fpg / 2); warnx("%s cannot exceed half the file system " "space", name); } warnx("%s changes from %jd to %d", name, sblock.fs_metaspace, kvalue); sblock.fs_metaspace = kvalue; } } if (lflag) { name = "multilabel"; if (strcmp(lvalue, "enable") == 0) { if (sblock.fs_flags & FS_MULTILABEL) { warnx("%s remains unchanged as enabled", name); } else { sblock.fs_flags |= FS_MULTILABEL; warnx("%s set", name); } } else if (strcmp(lvalue, "disable") == 0) { if ((~sblock.fs_flags & FS_MULTILABEL) == FS_MULTILABEL) { warnx("%s remains unchanged as disabled", name); } else { sblock.fs_flags &= ~FS_MULTILABEL; warnx("%s cleared", name); } } } if (mflag) { name = "minimum percentage of free space"; if (sblock.fs_minfree == mvalue) warnx("%s remains unchanged as %d%%", name, mvalue); else { warnx("%s changes from %d%% to %d%%", name, sblock.fs_minfree, mvalue); sblock.fs_minfree = mvalue; if (mvalue >= MINFREE && sblock.fs_optim == FS_OPTSPACE) warnx(OPTWARN, "time", ">=", MINFREE); if (mvalue < MINFREE && sblock.fs_optim == FS_OPTTIME) warnx(OPTWARN, "space", "<", MINFREE); } } if (Nflag) { name = "NFSv4 ACLs"; if (strcmp(Nvalue, "enable") == 0) { if (sblock.fs_flags & FS_NFS4ACLS) { warnx("%s remains unchanged as enabled", name); } else if (sblock.fs_flags & FS_ACLS) { warnx("%s and POSIX.1e ACLs are mutually " "exclusive", name); } else { sblock.fs_flags |= FS_NFS4ACLS; warnx("%s set", name); } } else if (strcmp(Nvalue, "disable") == 0) { if ((~sblock.fs_flags & FS_NFS4ACLS) == FS_NFS4ACLS) { warnx("%s remains unchanged as disabled", name); } else { sblock.fs_flags &= ~FS_NFS4ACLS; warnx("%s cleared", name); } } } if (nflag) { name = "soft updates"; if (strcmp(nvalue, "enable") == 0) { if (sblock.fs_flags & FS_DOSOFTDEP) warnx("%s remains unchanged as enabled", name); else if (sblock.fs_clean == 0) { warnx("%s cannot be enabled until fsck is run", name); } else { sblock.fs_flags |= FS_DOSOFTDEP; warnx("%s set", name); } } else if (strcmp(nvalue, "disable") == 0) { if ((~sblock.fs_flags & FS_DOSOFTDEP) == FS_DOSOFTDEP) warnx("%s remains unchanged as disabled", name); else { sblock.fs_flags &= ~FS_DOSOFTDEP; warnx("%s cleared", name); } } } if (oflag) { name = "optimization preference"; chg[FS_OPTSPACE] = "space"; chg[FS_OPTTIME] = "time"; if (sblock.fs_optim == ovalue) warnx("%s remains unchanged as %s", name, chg[ovalue]); else { warnx("%s changes from %s to %s", name, chg[sblock.fs_optim], chg[ovalue]); sblock.fs_optim = ovalue; if (sblock.fs_minfree >= MINFREE && ovalue == FS_OPTSPACE) warnx(OPTWARN, "time", ">=", MINFREE); if (sblock.fs_minfree < MINFREE && ovalue == FS_OPTTIME) warnx(OPTWARN, "space", "<", MINFREE); } } if (sflag) { name = "expected number of files per directory"; if (sblock.fs_avgfpdir == (unsigned)svalue) { warnx("%s remains unchanged as %d", name, svalue); } else { warnx("%s changes from %d to %d", name, sblock.fs_avgfpdir, svalue); sblock.fs_avgfpdir = svalue; } } if (tflag) { name = "issue TRIM to the disk"; if (strcmp(tvalue, "enable") == 0) { if (sblock.fs_flags & FS_TRIM) warnx("%s remains unchanged as enabled", name); else { sblock.fs_flags |= FS_TRIM; warnx("%s set", name); } } else if (strcmp(tvalue, "disable") == 0) { if ((~sblock.fs_flags & FS_TRIM) == FS_TRIM) warnx("%s remains unchanged as disabled", name); else { sblock.fs_flags &= ~FS_TRIM; warnx("%s cleared", name); } } } if (sbwrite(&disk, Aflag) == -1) goto err; ufs_disk_close(&disk); if (active) { bzero(&args, sizeof(args)); if (mount("ufs", on, stfs.f_flags | MNT_UPDATE | MNT_RELOAD, &args) < 0) err(9, "%s: reload", special); warnx("file system reloaded"); } exit(0); err: if (disk.d_error != NULL) errx(11, "%s: %s", special, disk.d_error); else err(12, "%s", special); }
void pass5(void) { int c, blk, frags, basesize, sumsize, mapsize, savednrpos = 0; int inomapsize, blkmapsize; struct fs *fs = &sblock; struct cg *cg = &cgrp; ufs_daddr_t dbase, dmax; ufs_daddr_t d; long i, j, k; struct csum *cs; struct csum cstotal; struct inodesc idesc[3]; char buf[MAXBSIZE]; struct cg *newcg = (struct cg *)buf; struct ocg *ocg = (struct ocg *)buf; inoinfo(WINO)->ino_state = USTATE; memset(newcg, 0, (size_t)fs->fs_cgsize); newcg->cg_niblk = fs->fs_ipg; if (cvtlevel >= 3) { if (fs->fs_maxcontig < 2 && fs->fs_contigsumsize > 0) { if (preen) pwarn("DELETING CLUSTERING MAPS\n"); if (preen || reply("DELETE CLUSTERING MAPS")) { fs->fs_contigsumsize = 0; doinglevel1 = 1; sbdirty(); } } if (fs->fs_maxcontig > 1) { char *doit = NULL; if (fs->fs_contigsumsize < 1) { doit = "CREAT"; } else if (fs->fs_contigsumsize < fs->fs_maxcontig && fs->fs_contigsumsize < FS_MAXCONTIG) { doit = "EXPAND"; } if (doit) { i = fs->fs_contigsumsize; fs->fs_contigsumsize = MIN(fs->fs_maxcontig, FS_MAXCONTIG); if (CGSIZE(fs) > fs->fs_bsize) { pwarn("CANNOT %s CLUSTER MAPS\n", doit); fs->fs_contigsumsize = i; } else if (preen || reply("CREATE CLUSTER MAPS")) { if (preen) pwarn("%sING CLUSTER MAPS\n", doit); fs->fs_cgsize = fragroundup(fs, CGSIZE(fs)); doinglevel1 = 1; sbdirty(); } } } } switch ((int)fs->fs_postblformat) { case FS_42POSTBLFMT: basesize = (char *)(&ocg->cg_btot[0]) - (char *)(&ocg->cg_firstfield); sumsize = &ocg->cg_iused[0] - (u_int8_t *)(&ocg->cg_btot[0]); mapsize = &ocg->cg_free[howmany(fs->fs_fpg, NBBY)] - (u_char *)&ocg->cg_iused[0]; blkmapsize = howmany(fs->fs_fpg, NBBY); inomapsize = &ocg->cg_free[0] - (u_char *)&ocg->cg_iused[0]; ocg->cg_magic = CG_MAGIC; savednrpos = fs->fs_nrpos; fs->fs_nrpos = 8; break; case FS_DYNAMICPOSTBLFMT: newcg->cg_btotoff = &newcg->cg_space[0] - (u_char *)(&newcg->cg_firstfield); newcg->cg_boff = newcg->cg_btotoff + fs->fs_cpg * sizeof(int32_t); newcg->cg_iusedoff = newcg->cg_boff + fs->fs_cpg * fs->fs_nrpos * sizeof(u_int16_t); newcg->cg_freeoff = newcg->cg_iusedoff + howmany(fs->fs_ipg, NBBY); inomapsize = newcg->cg_freeoff - newcg->cg_iusedoff; newcg->cg_nextfreeoff = newcg->cg_freeoff + howmany(fs->fs_cpg * fs->fs_spc / NSPF(fs), NBBY); blkmapsize = newcg->cg_nextfreeoff - newcg->cg_freeoff; if (fs->fs_contigsumsize > 0) { newcg->cg_clustersumoff = newcg->cg_nextfreeoff - sizeof(u_int32_t); newcg->cg_clustersumoff = roundup(newcg->cg_clustersumoff, sizeof(u_int32_t)); newcg->cg_clusteroff = newcg->cg_clustersumoff + (fs->fs_contigsumsize + 1) * sizeof(u_int32_t); newcg->cg_nextfreeoff = newcg->cg_clusteroff + howmany(fs->fs_cpg * fs->fs_spc / NSPB(fs), NBBY); } newcg->cg_magic = CG_MAGIC; basesize = &newcg->cg_space[0] - (u_char *)(&newcg->cg_firstfield); sumsize = newcg->cg_iusedoff - newcg->cg_btotoff; mapsize = newcg->cg_nextfreeoff - newcg->cg_iusedoff; break; default: inomapsize = blkmapsize = sumsize = 0; /* keep lint happy */ errx(EEXIT, "UNKNOWN ROTATIONAL TABLE FORMAT %d", fs->fs_postblformat); } memset(&idesc[0], 0, sizeof idesc); for (i = 0; i < 3; i++) { idesc[i].id_type = ADDR; if (doinglevel2) idesc[i].id_fix = FIX; } memset(&cstotal, 0, sizeof(struct csum)); j = blknum(fs, fs->fs_size + fs->fs_frag - 1); for (i = fs->fs_size; i < j; i++) setbmap(i); for (c = 0; c < fs->fs_ncg; c++) { if (got_siginfo) { printf("%s: phase 5: cyl group %d of %d (%d%%)\n", cdevname, c, sblock.fs_ncg, c * 100 / sblock.fs_ncg); got_siginfo = 0; } getblk(&cgblk, cgtod(fs, c), fs->fs_cgsize); if (!cg_chkmagic(cg)) pfatal("CG %d: BAD MAGIC NUMBER\n", c); dbase = cgbase(fs, c); dmax = dbase + fs->fs_fpg; if (dmax > fs->fs_size) dmax = fs->fs_size; newcg->cg_time = cg->cg_time; newcg->cg_cgx = c; if (c == fs->fs_ncg - 1) newcg->cg_ncyl = fs->fs_ncyl % fs->fs_cpg; else newcg->cg_ncyl = fs->fs_cpg; newcg->cg_ndblk = dmax - dbase; if (fs->fs_contigsumsize > 0) newcg->cg_nclusterblks = newcg->cg_ndblk / fs->fs_frag; newcg->cg_cs.cs_ndir = 0; newcg->cg_cs.cs_nffree = 0; newcg->cg_cs.cs_nbfree = 0; newcg->cg_cs.cs_nifree = fs->fs_ipg; if ((cg->cg_rotor >= 0) && (cg->cg_rotor < newcg->cg_ndblk)) newcg->cg_rotor = cg->cg_rotor; else newcg->cg_rotor = 0; if ((cg->cg_frotor >= 0) && (cg->cg_frotor < newcg->cg_ndblk)) newcg->cg_frotor = cg->cg_frotor; else newcg->cg_frotor = 0; if ((cg->cg_irotor >= 0) && (cg->cg_irotor < newcg->cg_niblk)) newcg->cg_irotor = cg->cg_irotor; else newcg->cg_irotor = 0; memset(&newcg->cg_frsum[0], 0, sizeof newcg->cg_frsum); memset(&cg_blktot(newcg)[0], 0, (size_t)(sumsize + mapsize)); if (fs->fs_postblformat == FS_42POSTBLFMT) ocg->cg_magic = CG_MAGIC; j = fs->fs_ipg * c; for (i = 0; i < inostathead[c].il_numalloced; j++, i++) { switch (inoinfo(j)->ino_state) { case USTATE: break; case DSTATE: case DCLEAR: case DFOUND: newcg->cg_cs.cs_ndir++; /* fall through */ case FSTATE: case FCLEAR: newcg->cg_cs.cs_nifree--; setbit(cg_inosused(newcg), i); break; default: if (j < ROOTINO) break; errx(EEXIT, "BAD STATE %d FOR INODE I=%ld", inoinfo(j)->ino_state, j); } } if (c == 0) for (i = 0; i < ROOTINO; i++) { setbit(cg_inosused(newcg), i); newcg->cg_cs.cs_nifree--; } for (i = 0, d = dbase; d < dmax; d += fs->fs_frag, i += fs->fs_frag) { frags = 0; for (j = 0; j < fs->fs_frag; j++) { if (testbmap(d + j)) continue; setbit(cg_blksfree(newcg), i + j); frags++; } if (frags == fs->fs_frag) { newcg->cg_cs.cs_nbfree++; j = cbtocylno(fs, i); cg_blktot(newcg)[j]++; cg_blks(fs, newcg, j)[cbtorpos(fs, i)]++; if (fs->fs_contigsumsize > 0) setbit(cg_clustersfree(newcg), i / fs->fs_frag); } else if (frags > 0) { newcg->cg_cs.cs_nffree += frags; blk = blkmap(fs, cg_blksfree(newcg), i); ffs_fragacct(fs, blk, newcg->cg_frsum, 1); } } if (fs->fs_contigsumsize > 0) { int32_t *sump = cg_clustersum(newcg); u_char *mapp = cg_clustersfree(newcg); int map = *mapp++; int bit = 1; int run = 0; for (i = 0; i < newcg->cg_nclusterblks; i++) { if ((map & bit) != 0) { run++; } else if (run != 0) { if (run > fs->fs_contigsumsize) run = fs->fs_contigsumsize; sump[run]++; run = 0; } if ((i & (NBBY - 1)) != (NBBY - 1)) { bit <<= 1; } else { map = *mapp++; bit = 1; } } if (run != 0) { if (run > fs->fs_contigsumsize) run = fs->fs_contigsumsize; sump[run]++; } } cstotal.cs_nffree += newcg->cg_cs.cs_nffree; cstotal.cs_nbfree += newcg->cg_cs.cs_nbfree; cstotal.cs_nifree += newcg->cg_cs.cs_nifree; cstotal.cs_ndir += newcg->cg_cs.cs_ndir; cs = &fs->fs_cs(fs, c); if (memcmp(&newcg->cg_cs, cs, sizeof *cs) != 0 && dofix(&idesc[0], "FREE BLK COUNT(S) WRONG IN SUPERBLK")) { memmove(cs, &newcg->cg_cs, sizeof *cs); sbdirty(); } if (doinglevel1) { memmove(cg, newcg, (size_t)fs->fs_cgsize); cgdirty(); continue; } if ((memcmp(newcg, cg, basesize) != 0 || memcmp(&cg_blktot(newcg)[0], &cg_blktot(cg)[0], sumsize) != 0) && dofix(&idesc[2], "SUMMARY INFORMATION BAD")) { memmove(cg, newcg, (size_t)basesize); memmove(&cg_blktot(cg)[0], &cg_blktot(newcg)[0], (size_t)sumsize); cgdirty(); } if (usedsoftdep) { for (i = 0; i < inomapsize; i++) { j = cg_inosused(newcg)[i]; if ((cg_inosused(cg)[i] & j) == j) continue; for (k = 0; k < NBBY; k++) { if ((j & (1 << k)) == 0) continue; if (cg_inosused(cg)[i] & (1 << k)) continue; pwarn("ALLOCATED INODE %d MARKED FREE\n", c * fs->fs_ipg + i * NBBY + k); } } for (i = 0; i < blkmapsize; i++) { j = cg_blksfree(cg)[i]; if ((cg_blksfree(newcg)[i] & j) == j) continue; for (k = 0; k < NBBY; k++) { if ((j & (1 << k)) == 0) continue; if (cg_blksfree(newcg)[i] & (1 << k)) continue; pwarn("ALLOCATED FRAG %d MARKED FREE\n", c * fs->fs_fpg + i * NBBY + k); } } } if (memcmp(cg_inosused(newcg), cg_inosused(cg), mapsize) != 0 && dofix(&idesc[1], "BLK(S) MISSING IN BIT MAPS")) { memmove(cg_inosused(cg), cg_inosused(newcg), (size_t)mapsize); cgdirty(); } } if (fs->fs_postblformat == FS_42POSTBLFMT) fs->fs_nrpos = savednrpos; if (memcmp(&cstotal, &fs->fs_cstotal, sizeof *cs) != 0 && dofix(&idesc[0], "FREE BLK COUNT(S) WRONG IN SUPERBLK")) { memmove(&fs->fs_cstotal, &cstotal, sizeof *cs); fs->fs_ronly = 0; fs->fs_fmod = 0; sbdirty(); } }
void pass5(void) { int c, blk, frags, basesize, sumsize, mapsize, cssize; int inomapsize, blkmapsize; struct fs *fs = sblock; daddr_t dbase, dmax; daddr_t d; long i, j, k; struct csum *cs; struct csum_total cstotal; struct inodesc idesc[4]; char buf[MAXBSIZE]; struct cg *newcg = (struct cg *)buf; struct ocg *ocg = (struct ocg *)buf; struct cg *cg = cgrp, *ncg; struct inostat *info; u_int32_t ncgsize; inoinfo(WINO)->ino_state = USTATE; memset(newcg, 0, (size_t)fs->fs_cgsize); newcg->cg_niblk = fs->fs_ipg; if (cvtlevel >= 3) { if (fs->fs_maxcontig < 2 && fs->fs_contigsumsize > 0) { if (preen) pwarn("DELETING CLUSTERING MAPS\n"); if (preen || reply("DELETE CLUSTERING MAPS")) { fs->fs_contigsumsize = 0; doinglevel1 = 1; sbdirty(); } } if (fs->fs_maxcontig > 1) { const char *doit = NULL; if (fs->fs_contigsumsize < 1) { doit = "CREAT"; } else if (fs->fs_contigsumsize < fs->fs_maxcontig && fs->fs_contigsumsize < FS_MAXCONTIG) { doit = "EXPAND"; } if (doit) { i = fs->fs_contigsumsize; fs->fs_contigsumsize = MIN(fs->fs_maxcontig, FS_MAXCONTIG); if (CGSIZE(fs) > fs->fs_bsize) { pwarn("CANNOT %s CLUSTER MAPS\n", doit); fs->fs_contigsumsize = i; } else if (preen || reply("CREATE CLUSTER MAPS")) { if (preen) pwarn("%sING CLUSTER MAPS\n", doit); ncgsize = fragroundup(fs, CGSIZE(fs)); ncg = realloc(cgrp, ncgsize); if (ncg == NULL) errexit( "cannot reallocate cg space"); cg = cgrp = ncg; fs->fs_cgsize = ncgsize; doinglevel1 = 1; sbdirty(); } } } } basesize = &newcg->cg_space[0] - (u_char *)(&newcg->cg_firstfield); cssize = (u_char *)&cstotal.cs_spare[0] - (u_char *)&cstotal.cs_ndir; sumsize = 0; if (is_ufs2) { newcg->cg_iusedoff = basesize; } else { /* * We reserve the space for the old rotation summary * tables for the benefit of old kernels, but do not * maintain them in modern kernels. In time, they can * go away. */ newcg->cg_old_btotoff = basesize; newcg->cg_old_boff = newcg->cg_old_btotoff + fs->fs_old_cpg * sizeof(int32_t); newcg->cg_iusedoff = newcg->cg_old_boff + fs->fs_old_cpg * fs->fs_old_nrpos * sizeof(u_int16_t); memset(&newcg->cg_space[0], 0, newcg->cg_iusedoff - basesize); } inomapsize = howmany(fs->fs_ipg, CHAR_BIT); newcg->cg_freeoff = newcg->cg_iusedoff + inomapsize; blkmapsize = howmany(fs->fs_fpg, CHAR_BIT); newcg->cg_nextfreeoff = newcg->cg_freeoff + blkmapsize; if (fs->fs_contigsumsize > 0) { newcg->cg_clustersumoff = newcg->cg_nextfreeoff - sizeof(u_int32_t); if (isappleufs) { /* Apple PR2216969 gives rationale for this change. * I believe they were mistaken, but we need to * duplicate it for compatibility. -- [email protected] */ newcg->cg_clustersumoff += sizeof(u_int32_t); } newcg->cg_clustersumoff = roundup(newcg->cg_clustersumoff, sizeof(u_int32_t)); newcg->cg_clusteroff = newcg->cg_clustersumoff + (fs->fs_contigsumsize + 1) * sizeof(u_int32_t); newcg->cg_nextfreeoff = newcg->cg_clusteroff + howmany(fragstoblks(fs, fs->fs_fpg), CHAR_BIT); } newcg->cg_magic = CG_MAGIC; mapsize = newcg->cg_nextfreeoff - newcg->cg_iusedoff; if (!is_ufs2 && ((fs->fs_old_flags & FS_FLAGS_UPDATED) == 0)) { switch ((int)fs->fs_old_postblformat) { case FS_42POSTBLFMT: basesize = (char *)(&ocg->cg_btot[0]) - (char *)(&ocg->cg_firstfield); sumsize = &ocg->cg_iused[0] - (u_int8_t *)(&ocg->cg_btot[0]); mapsize = &ocg->cg_free[howmany(fs->fs_fpg, NBBY)] - (u_char *)&ocg->cg_iused[0]; blkmapsize = howmany(fs->fs_fpg, NBBY); inomapsize = &ocg->cg_free[0] - (u_char *)&ocg->cg_iused[0]; ocg->cg_magic = CG_MAGIC; newcg->cg_magic = 0; break; case FS_DYNAMICPOSTBLFMT: sumsize = newcg->cg_iusedoff - newcg->cg_old_btotoff; break; default: errexit("UNKNOWN ROTATIONAL TABLE FORMAT %d", fs->fs_old_postblformat); } } memset(&idesc[0], 0, sizeof idesc); for (i = 0; i < 4; i++) { idesc[i].id_type = ADDR; if (!is_ufs2 && doinglevel2) idesc[i].id_fix = FIX; } memset(&cstotal, 0, sizeof(struct csum_total)); dmax = blknum(fs, fs->fs_size + fs->fs_frag - 1); for (d = fs->fs_size; d < dmax; d++) setbmap(d); for (c = 0; c < fs->fs_ncg; c++) { if (got_siginfo) { fprintf(stderr, "%s: phase 5: cyl group %d of %d (%d%%)\n", cdevname(), c, fs->fs_ncg, c * 100 / fs->fs_ncg); got_siginfo = 0; } #ifdef PROGRESS progress_bar(cdevname(), preen ? NULL : "phase 5", c, fs->fs_ncg); #endif /* PROGRESS */ getblk(&cgblk, cgtod(fs, c), fs->fs_cgsize); memcpy(cg, cgblk.b_un.b_cg, fs->fs_cgsize); if((doswap && !needswap) || (!doswap && needswap)) ffs_cg_swap(cgblk.b_un.b_cg, cg, sblock); if (!doinglevel1 && !cg_chkmagic(cg, 0)) pfatal("CG %d: PASS5: BAD MAGIC NUMBER\n", c); if(doswap) cgdirty(); /* * While we have the disk head where we want it, * write back the superblock to the spare at this * cylinder group. */ if ((cvtlevel && sblk.b_dirty) || doswap) { bwrite(fswritefd, sblk.b_un.b_buf, fsbtodb(sblock, cgsblock(sblock, c)), sblock->fs_sbsize); } else { /* * Read in the current alternate superblock, * and compare it to the master. If it's * wrong, fix it up. */ getblk(&asblk, cgsblock(sblock, c), sblock->fs_sbsize); if (asblk.b_errs) pfatal("CG %d: UNABLE TO READ ALTERNATE " "SUPERBLK\n", c); else { memmove(altsblock, asblk.b_un.b_fs, sblock->fs_sbsize); if (needswap) ffs_sb_swap(asblk.b_un.b_fs, altsblock); } sb_oldfscompat_write(sblock, sblocksave); if ((asblk.b_errs || cmpsblks(sblock, altsblock)) && dofix(&idesc[3], "ALTERNATE SUPERBLK(S) ARE INCORRECT")) { bwrite(fswritefd, sblk.b_un.b_buf, fsbtodb(sblock, cgsblock(sblock, c)), sblock->fs_sbsize); } sb_oldfscompat_read(sblock, 0); } dbase = cgbase(fs, c); dmax = dbase + fs->fs_fpg; if (dmax > fs->fs_size) dmax = fs->fs_size; if (is_ufs2 || (fs->fs_old_flags & FS_FLAGS_UPDATED)) newcg->cg_time = cg->cg_time; newcg->cg_old_time = cg->cg_old_time; newcg->cg_cgx = c; newcg->cg_ndblk = dmax - dbase; if (!is_ufs2) { if (c == fs->fs_ncg - 1) { /* Avoid fighting old fsck for this value. Its never used * outside of this check anyway. */ if ((fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) newcg->cg_old_ncyl = fs->fs_old_ncyl % fs->fs_old_cpg; else newcg->cg_old_ncyl = howmany(newcg->cg_ndblk, fs->fs_fpg / fs->fs_old_cpg); } else newcg->cg_old_ncyl = fs->fs_old_cpg; newcg->cg_old_niblk = fs->fs_ipg; newcg->cg_niblk = 0; } if (fs->fs_contigsumsize > 0) newcg->cg_nclusterblks = newcg->cg_ndblk / fs->fs_frag; newcg->cg_cs.cs_ndir = 0; newcg->cg_cs.cs_nffree = 0; newcg->cg_cs.cs_nbfree = 0; newcg->cg_cs.cs_nifree = fs->fs_ipg; if (cg->cg_rotor >= 0 && cg->cg_rotor < newcg->cg_ndblk) newcg->cg_rotor = cg->cg_rotor; else newcg->cg_rotor = 0; if (cg->cg_frotor >= 0 && cg->cg_frotor < newcg->cg_ndblk) newcg->cg_frotor = cg->cg_frotor; else newcg->cg_frotor = 0; if (cg->cg_irotor >= 0 && cg->cg_irotor < fs->fs_ipg) newcg->cg_irotor = cg->cg_irotor; else newcg->cg_irotor = 0; if (!is_ufs2) { newcg->cg_initediblk = 0; } else { if ((unsigned)cg->cg_initediblk > fs->fs_ipg) newcg->cg_initediblk = fs->fs_ipg; else newcg->cg_initediblk = cg->cg_initediblk; } memset(&newcg->cg_frsum[0], 0, sizeof newcg->cg_frsum); memset(&old_cg_blktot(newcg, 0)[0], 0, (size_t)(sumsize)); memset(cg_inosused(newcg, 0), 0, (size_t)(mapsize)); if (!is_ufs2 && ((fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) && fs->fs_old_postblformat == FS_42POSTBLFMT) ocg->cg_magic = CG_MAGIC; j = fs->fs_ipg * c; for (i = 0; i < fs->fs_ipg; j++, i++) { info = inoinfo(j); switch (info->ino_state) { case USTATE: break; case DSTATE: case DCLEAR: case DFOUND: newcg->cg_cs.cs_ndir++; /* fall through */ case FSTATE: case FCLEAR: newcg->cg_cs.cs_nifree--; setbit(cg_inosused(newcg, 0), i); break; default: if (j < ROOTINO) break; errexit("BAD STATE %d FOR INODE I=%ld", info->ino_state, (long)j); } } if (c == 0) for (i = 0; i < ROOTINO; i++) { setbit(cg_inosused(newcg, 0), i); newcg->cg_cs.cs_nifree--; } for (i = 0, d = dbase; d < dmax; d += fs->fs_frag, i += fs->fs_frag) { frags = 0; for (j = 0; j < fs->fs_frag; j++) { if (testbmap(d + j)) continue; setbit(cg_blksfree(newcg, 0), i + j); frags++; } if (frags == fs->fs_frag) { newcg->cg_cs.cs_nbfree++; if (sumsize) { j = old_cbtocylno(fs, i); old_cg_blktot(newcg, 0)[j]++; old_cg_blks(fs, newcg, j, 0)[old_cbtorpos(fs, i)]++; } if (fs->fs_contigsumsize > 0) setbit(cg_clustersfree(newcg, 0), fragstoblks(fs, i)); } else if (frags > 0) { newcg->cg_cs.cs_nffree += frags; blk = blkmap(fs, cg_blksfree(newcg, 0), i); ffs_fragacct(fs, blk, newcg->cg_frsum, 1, 0); } } if (fs->fs_contigsumsize > 0) { int32_t *sump = cg_clustersum(newcg, 0); u_char *mapp = cg_clustersfree(newcg, 0); int map = *mapp++; int bit = 1; int run = 0; for (i = 0; i < newcg->cg_nclusterblks; i++) { if ((map & bit) != 0) { run++; } else if (run != 0) { if (run > fs->fs_contigsumsize) run = fs->fs_contigsumsize; sump[run]++; run = 0; } if ((i & (NBBY - 1)) != (NBBY - 1)) { bit <<= 1; } else { map = *mapp++; bit = 1; } } if (run != 0) { if (run > fs->fs_contigsumsize) run = fs->fs_contigsumsize; sump[run]++; } } cstotal.cs_nffree += newcg->cg_cs.cs_nffree; cstotal.cs_nbfree += newcg->cg_cs.cs_nbfree; cstotal.cs_nifree += newcg->cg_cs.cs_nifree; cstotal.cs_ndir += newcg->cg_cs.cs_ndir; cs = &fs->fs_cs(fs, c); if (memcmp(&newcg->cg_cs, cs, sizeof *cs) != 0) { if (debug) { printf("cg %d: nffree: %d/%d nbfree %d/%d" " nifree %d/%d ndir %d/%d\n", c, cs->cs_nffree,newcg->cg_cs.cs_nffree, cs->cs_nbfree,newcg->cg_cs.cs_nbfree, cs->cs_nifree,newcg->cg_cs.cs_nifree, cs->cs_ndir,newcg->cg_cs.cs_ndir); } if (dofix(&idesc[0], "FREE BLK COUNT(S) WRONG IN SUPERBLK")) { memmove(cs, &newcg->cg_cs, sizeof *cs); sbdirty(); } else markclean = 0; } if (doinglevel1) { memmove(cg, newcg, (size_t)fs->fs_cgsize); cgdirty(); continue; } if ((memcmp(newcg, cg, basesize) != 0) || (memcmp(&old_cg_blktot(newcg, 0)[0], &old_cg_blktot(cg, 0)[0], sumsize) != 0)) { if (dofix(&idesc[2], "SUMMARY INFORMATION BAD")) { memmove(cg, newcg, (size_t)basesize); memmove(&old_cg_blktot(cg, 0)[0], &old_cg_blktot(newcg, 0)[0], (size_t)sumsize); cgdirty(); } else markclean = 0; } if (usedsoftdep) { for (i = 0; i < inomapsize; i++) { j = cg_inosused(newcg, 0)[i]; if ((cg_inosused(cg, 0)[i] & j) == j) continue; for (k = 0; k < NBBY; k++) { if ((j & (1 << k)) == 0) continue; if (cg_inosused(cg, 0)[i] & (1 << k)) continue; pwarn("ALLOCATED INODE %ld " "MARKED FREE\n", c * fs->fs_ipg + i * 8 + k); } } for (i = 0; i < blkmapsize; i++) { j = cg_blksfree(cg, 0)[i]; if ((cg_blksfree(newcg, 0)[i] & j) == j) continue; for (k = 0; k < NBBY; k++) { if ((j & (1 << k)) == 0) continue; if (cg_inosused(cg, 0)[i] & (1 << k)) continue; pwarn("ALLOCATED FRAG %ld " "MARKED FREE\n", c * fs->fs_fpg + i * 8 + k); } } } if (memcmp(cg_inosused(newcg, 0), cg_inosused(cg, 0), mapsize) != 0 && dofix(&idesc[1], "BLK(S) MISSING IN BIT MAPS")) { memmove(cg_inosused(cg, 0), cg_inosused(newcg, 0), (size_t)mapsize); cgdirty(); } } if (memcmp(&cstotal, &fs->fs_cstotal, cssize) != 0) { if (debug) { printf("total: nffree: %lld/%lld nbfree %lld/%lld" " nifree %lld/%lld ndir %lld/%lld\n", (long long int)fs->fs_cstotal.cs_nffree, (long long int)cstotal.cs_nffree, (long long int)fs->fs_cstotal.cs_nbfree, (long long int)cstotal.cs_nbfree, (long long int)fs->fs_cstotal.cs_nifree, (long long int)cstotal.cs_nifree, (long long int)fs->fs_cstotal.cs_ndir, (long long int)cstotal.cs_ndir); } if (dofix(&idesc[0], "FREE BLK COUNT(S) WRONG IN SUPERBLK")) { memmove(&fs->fs_cstotal, &cstotal, sizeof cstotal); fs->fs_ronly = 0; fs->fs_fmod = 0; sbdirty(); } else markclean = 0; } #ifdef PROGRESS if (!preen) progress_done(); #endif /* PROGRESS */ }
void pass5(void) { int c, i, j, blk, frags, basesize, mapsize; int inomapsize, blkmapsize; struct fs *fs = &sblock; ufs2_daddr_t d, dbase, dmax, start; int rewritecg = 0; struct csum *cs; struct csum_total cstotal; struct inodesc idesc[3]; char buf[MAXBSIZE]; struct cg *cg, *newcg = (struct cg *)buf; struct bufarea *cgbp; inoinfo(WINO)->ino_state = USTATE; memset(newcg, 0, (size_t)fs->fs_cgsize); newcg->cg_niblk = fs->fs_ipg; if (cvtlevel >= 3) { if (fs->fs_maxcontig < 2 && fs->fs_contigsumsize > 0) { if (preen) pwarn("DELETING CLUSTERING MAPS\n"); if (preen || reply("DELETE CLUSTERING MAPS")) { fs->fs_contigsumsize = 0; rewritecg = 1; sbdirty(); } } if (fs->fs_maxcontig > 1) { const char *doit = 0; if (fs->fs_contigsumsize < 1) { doit = "CREAT"; } else if (fs->fs_contigsumsize < fs->fs_maxcontig && fs->fs_contigsumsize < FS_MAXCONTIG) { doit = "EXPAND"; } if (doit) { i = fs->fs_contigsumsize; fs->fs_contigsumsize = MIN(fs->fs_maxcontig, FS_MAXCONTIG); if (CGSIZE(fs) > (u_int)fs->fs_bsize) { pwarn("CANNOT %s CLUSTER MAPS\n", doit); fs->fs_contigsumsize = i; } else if (preen || reply("CREATE CLUSTER MAPS")) { if (preen) pwarn("%sING CLUSTER MAPS\n", doit); fs->fs_cgsize = fragroundup(fs, CGSIZE(fs)); rewritecg = 1; sbdirty(); } } } } basesize = &newcg->cg_space[0] - (u_char *)(&newcg->cg_firstfield); if (sblock.fs_magic == FS_UFS2_MAGIC) { newcg->cg_iusedoff = basesize; } else { /* * We reserve the space for the old rotation summary * tables for the benefit of old kernels, but do not * maintain them in modern kernels. In time, they can * go away. */ newcg->cg_old_btotoff = basesize; newcg->cg_old_boff = newcg->cg_old_btotoff + fs->fs_old_cpg * sizeof(int32_t); newcg->cg_iusedoff = newcg->cg_old_boff + fs->fs_old_cpg * fs->fs_old_nrpos * sizeof(u_int16_t); memset(&newcg->cg_space[0], 0, newcg->cg_iusedoff - basesize); } inomapsize = howmany(fs->fs_ipg, CHAR_BIT); newcg->cg_freeoff = newcg->cg_iusedoff + inomapsize; blkmapsize = howmany(fs->fs_fpg, CHAR_BIT); newcg->cg_nextfreeoff = newcg->cg_freeoff + blkmapsize; if (fs->fs_contigsumsize > 0) { newcg->cg_clustersumoff = newcg->cg_nextfreeoff - sizeof(u_int32_t); newcg->cg_clustersumoff = roundup(newcg->cg_clustersumoff, sizeof(u_int32_t)); newcg->cg_clusteroff = newcg->cg_clustersumoff + (fs->fs_contigsumsize + 1) * sizeof(u_int32_t); newcg->cg_nextfreeoff = newcg->cg_clusteroff + howmany(fragstoblks(fs, fs->fs_fpg), CHAR_BIT); } newcg->cg_magic = CG_MAGIC; mapsize = newcg->cg_nextfreeoff - newcg->cg_iusedoff; memset(&idesc[0], 0, sizeof idesc); for (i = 0; i < 3; i++) idesc[i].id_type = ADDR; memset(&cstotal, 0, sizeof(struct csum_total)); dmax = blknum(fs, fs->fs_size + fs->fs_frag - 1); for (d = fs->fs_size; d < dmax; d++) setbmap(d); for (c = 0; c < fs->fs_ncg; c++) { if (got_siginfo) { printf("%s: phase 5: cyl group %d of %d (%d%%)\n", cdevname, c, sblock.fs_ncg, c * 100 / sblock.fs_ncg); got_siginfo = 0; } if (got_sigalarm) { setproctitle("%s p5 %d%%", cdevname, c * 100 / sblock.fs_ncg); got_sigalarm = 0; } cgbp = cgget(c); cg = cgbp->b_un.b_cg; if (!cg_chkmagic(cg)) pfatal("CG %d: BAD MAGIC NUMBER\n", c); newcg->cg_time = cg->cg_time; newcg->cg_old_time = cg->cg_old_time; newcg->cg_unrefs = cg->cg_unrefs; newcg->cg_cgx = c; dbase = cgbase(fs, c); dmax = dbase + fs->fs_fpg; if (dmax > fs->fs_size) dmax = fs->fs_size; newcg->cg_ndblk = dmax - dbase; if (fs->fs_magic == FS_UFS1_MAGIC) { if (c == fs->fs_ncg - 1) newcg->cg_old_ncyl = howmany(newcg->cg_ndblk, fs->fs_fpg / fs->fs_old_cpg); else newcg->cg_old_ncyl = fs->fs_old_cpg; newcg->cg_old_niblk = fs->fs_ipg; newcg->cg_niblk = 0; } if (fs->fs_contigsumsize > 0) newcg->cg_nclusterblks = newcg->cg_ndblk / fs->fs_frag; newcg->cg_cs.cs_ndir = 0; newcg->cg_cs.cs_nffree = 0; newcg->cg_cs.cs_nbfree = 0; newcg->cg_cs.cs_nifree = fs->fs_ipg; if (cg->cg_rotor >= 0 && cg->cg_rotor < newcg->cg_ndblk) newcg->cg_rotor = cg->cg_rotor; else newcg->cg_rotor = 0; if (cg->cg_frotor >= 0 && cg->cg_frotor < newcg->cg_ndblk) newcg->cg_frotor = cg->cg_frotor; else newcg->cg_frotor = 0; if (cg->cg_irotor >= 0 && cg->cg_irotor < fs->fs_ipg) newcg->cg_irotor = cg->cg_irotor; else newcg->cg_irotor = 0; if (fs->fs_magic == FS_UFS1_MAGIC) { newcg->cg_initediblk = 0; } else { if ((unsigned)cg->cg_initediblk > fs->fs_ipg) newcg->cg_initediblk = fs->fs_ipg; else newcg->cg_initediblk = cg->cg_initediblk; } memset(&newcg->cg_frsum[0], 0, sizeof newcg->cg_frsum); memset(cg_inosused(newcg), 0, (size_t)(mapsize)); j = fs->fs_ipg * c; for (i = 0; i < inostathead[c].il_numalloced; j++, i++) { switch (inoinfo(j)->ino_state) { case USTATE: break; case DSTATE: case DCLEAR: case DFOUND: case DZLINK: newcg->cg_cs.cs_ndir++; /* FALLTHROUGH */ case FSTATE: case FCLEAR: case FZLINK: newcg->cg_cs.cs_nifree--; setbit(cg_inosused(newcg), i); break; default: if (j < (int)ROOTINO) break; errx(EEXIT, "BAD STATE %d FOR INODE I=%d", inoinfo(j)->ino_state, j); } } if (c == 0) for (i = 0; i < (int)ROOTINO; i++) { setbit(cg_inosused(newcg), i); newcg->cg_cs.cs_nifree--; } start = -1; for (i = 0, d = dbase; d < dmax; d += fs->fs_frag, i += fs->fs_frag) { frags = 0; for (j = 0; j < fs->fs_frag; j++) { if (testbmap(d + j)) { if (Eflag && start != -1) { clear_blocks(start, d + j - 1); start = -1; } continue; } if (start == -1) start = d + j; setbit(cg_blksfree(newcg), i + j); frags++; } if (frags == fs->fs_frag) { newcg->cg_cs.cs_nbfree++; if (fs->fs_contigsumsize > 0) setbit(cg_clustersfree(newcg), i / fs->fs_frag); } else if (frags > 0) { newcg->cg_cs.cs_nffree += frags; blk = blkmap(fs, cg_blksfree(newcg), i); ffs_fragacct(fs, blk, newcg->cg_frsum, 1); } } if (Eflag && start != -1) clear_blocks(start, d - 1); if (fs->fs_contigsumsize > 0) { int32_t *sump = cg_clustersum(newcg); u_char *mapp = cg_clustersfree(newcg); int map = *mapp++; int bit = 1; int run = 0; for (i = 0; i < newcg->cg_nclusterblks; i++) { if ((map & bit) != 0) { run++; } else if (run != 0) { if (run > fs->fs_contigsumsize) run = fs->fs_contigsumsize; sump[run]++; run = 0; } if ((i & (CHAR_BIT - 1)) != (CHAR_BIT - 1)) { bit <<= 1; } else { map = *mapp++; bit = 1; } } if (run != 0) { if (run > fs->fs_contigsumsize) run = fs->fs_contigsumsize; sump[run]++; } } if (bkgrdflag != 0) { cstotal.cs_nffree += cg->cg_cs.cs_nffree; cstotal.cs_nbfree += cg->cg_cs.cs_nbfree; cstotal.cs_nifree += cg->cg_cs.cs_nifree; cstotal.cs_ndir += cg->cg_cs.cs_ndir; } else { cstotal.cs_nffree += newcg->cg_cs.cs_nffree; cstotal.cs_nbfree += newcg->cg_cs.cs_nbfree; cstotal.cs_nifree += newcg->cg_cs.cs_nifree; cstotal.cs_ndir += newcg->cg_cs.cs_ndir; } cs = &fs->fs_cs(fs, c); if (cursnapshot == 0 && memcmp(&newcg->cg_cs, cs, sizeof *cs) != 0 && dofix(&idesc[0], "FREE BLK COUNT(S) WRONG IN SUPERBLK")) { memmove(cs, &newcg->cg_cs, sizeof *cs); sbdirty(); } if (rewritecg) { memmove(cg, newcg, (size_t)fs->fs_cgsize); dirty(cgbp); continue; } if (cursnapshot == 0 && memcmp(newcg, cg, basesize) != 0 && dofix(&idesc[2], "SUMMARY INFORMATION BAD")) { memmove(cg, newcg, (size_t)basesize); dirty(cgbp); } if (bkgrdflag != 0 || usedsoftdep || debug) update_maps(cg, newcg, bkgrdflag); if (cursnapshot == 0 && memcmp(cg_inosused(newcg), cg_inosused(cg), mapsize) != 0 && dofix(&idesc[1], "BLK(S) MISSING IN BIT MAPS")) { memmove(cg_inosused(cg), cg_inosused(newcg), (size_t)mapsize); dirty(cgbp); } } if (cursnapshot == 0 && memcmp(&cstotal, &fs->fs_cstotal, sizeof cstotal) != 0 && dofix(&idesc[0], "SUMMARY BLK COUNT(S) WRONG IN SUPERBLK")) { memmove(&fs->fs_cstotal, &cstotal, sizeof cstotal); fs->fs_ronly = 0; fs->fs_fmod = 0; sbdirty(); } /* * When doing background fsck on a snapshot, figure out whether * the superblock summary is inaccurate and correct it when * necessary. */ if (cursnapshot != 0) { cmd.size = 1; cmd.value = cstotal.cs_ndir - fs->fs_cstotal.cs_ndir; if (cmd.value != 0) { if (debug) printf("adjndir by %+" PRIi64 "\n", cmd.value); if (bkgrdsumadj == 0 || sysctl(adjndir, MIBSIZE, 0, 0, &cmd, sizeof cmd) == -1) rwerror("ADJUST NUMBER OF DIRECTORIES", cmd.value); } cmd.value = cstotal.cs_nbfree - fs->fs_cstotal.cs_nbfree; if (cmd.value != 0) { if (debug) printf("adjnbfree by %+" PRIi64 "\n", cmd.value); if (bkgrdsumadj == 0 || sysctl(adjnbfree, MIBSIZE, 0, 0, &cmd, sizeof cmd) == -1) rwerror("ADJUST NUMBER OF FREE BLOCKS", cmd.value); } cmd.value = cstotal.cs_nifree - fs->fs_cstotal.cs_nifree; if (cmd.value != 0) { if (debug) printf("adjnifree by %+" PRIi64 "\n", cmd.value); if (bkgrdsumadj == 0 || sysctl(adjnifree, MIBSIZE, 0, 0, &cmd, sizeof cmd) == -1) rwerror("ADJUST NUMBER OF FREE INODES", cmd.value); } cmd.value = cstotal.cs_nffree - fs->fs_cstotal.cs_nffree; if (cmd.value != 0) { if (debug) printf("adjnffree by %+" PRIi64 "\n", cmd.value); if (bkgrdsumadj == 0 || sysctl(adjnffree, MIBSIZE, 0, 0, &cmd, sizeof cmd) == -1) rwerror("ADJUST NUMBER OF FREE FRAGS", cmd.value); } cmd.value = cstotal.cs_numclusters - fs->fs_cstotal.cs_numclusters; if (cmd.value != 0) { if (debug) printf("adjnumclusters by %+" PRIi64 "\n", cmd.value); if (bkgrdsumadj == 0 || sysctl(adjnumclusters, MIBSIZE, 0, 0, &cmd, sizeof cmd) == -1) rwerror("ADJUST NUMBER OF FREE CLUSTERS", cmd.value); } } }
/* * tmpfs_read() * Read bytes out of the current file or directory * * Directories get their own routine. */ void tmpfs_read(struct msg *m, struct file *f) { uint nseg, cnt, lim, blk; struct openfile *o; char *blkp, *tmpbuf = NULL; seg_t *mp; /* * Directory--only one is the root */ if ((o = f->f_file) == 0) { tmpfs_readdir(m, f); return; } /* * Access? */ if (!(f->f_perm & ACC_READ)) { msg_err(m->m_sender, EPERM); return; } /* * EOF? */ if (f->f_pos >= o->o_len) { m->m_arg = m->m_arg1 = m->m_buflen = m->m_nseg = 0; msg_reply(m->m_sender, m); return; } /* * Calculate # bytes to get */ lim = m->m_arg; if (lim > (o->o_len - f->f_pos)) { lim = o->o_len - f->f_pos; } /* * Build message segments */ cnt = 0; for (nseg = 0; (cnt < lim) && (nseg < MSGSEGS); ++nseg) { uint off, sz; /* * Get next block of data. We simulate sparse * files by using our pre-allocated source of * zeroes. */ blk = blknum(f->f_pos); blkp = hash_lookup(o->o_blocks, blk); if (blkp == 0) { extern char *zeroes; blkp = zeroes; } /* * Calculate how much of the block to add to * the message. */ blkoff(f->f_pos, &off, &sz); if ((cnt+sz) > lim) { sz = lim - cnt; } /* * Put into next message segment */ m->m_seg[nseg].s_buf = blkp+off; m->m_seg[nseg].s_buflen = sz; /* * Advance counter */ cnt += sz; f->f_pos += sz; } /* * If we couldn't satisfy the read using our scatter/gather * allowance of buffers, use the last slot as a pointer to * a buffer which will hold all the necessary data. */ if (cnt < lim) { uint tmpoff = 0; /* * Get a buffer which can hold the last data of * the message segments already assembled, plus * all the data we haven't read in yet. */ mp = &m->m_seg[MSGSEGS-1]; tmpoff = mp->s_buflen; tmpbuf = malloc(tmpoff + (lim - cnt)); if (tmpbuf == NULL) { goto retshort; } /* * Copy in the current data */ bcopy(mp->s_buf, tmpbuf, tmpoff); /* * Now bring in the rest of the data from the file */ while (cnt < lim) { uint off, sz; /* * Get next block of data. We simulate sparse * files by using our pre-allocated source of * zeroes. */ blk = blknum(f->f_pos); blkp = hash_lookup(o->o_blocks, blk); if (blkp == 0) { extern char *zeroes; blkp = zeroes; } /* * Calculate how much of the block to add to * the message. */ blkoff(f->f_pos, &off, &sz); if ((cnt+sz) > lim) { sz = lim - cnt; } /* * Put into next message segment */ bcopy(blkp+off, tmpbuf+tmpoff, sz); /* * Advance counter */ cnt += sz; f->f_pos += sz; tmpoff += sz; } /* * Point the last message segment into this buffer */ mp->s_buf = tmpbuf; mp->s_buflen = tmpoff; } /* * Send back reply */ retshort: m->m_arg = cnt; m->m_nseg = nseg; m->m_arg1 = 0; msg_reply(m->m_sender, m); /* * Free our extra buffer if it's been used */ if (tmpbuf) { free(tmpbuf); } }