int cgbfree(struct uufsd *disk, ufs2_daddr_t bno, long size) { u_int8_t *blksfree; struct fs *fs; struct cg *cgp; ufs1_daddr_t fragno, cgbno; int i, cg, blk, frags, bbase; fs = &disk->d_fs; cg = dtog(fs, bno); if (cgread1(disk, cg) != 1) return (-1); cgp = &disk->d_cg; cgbno = dtogd(fs, bno); blksfree = cg_blksfree(cgp); if (size == fs->fs_bsize) { fragno = fragstoblks(fs, cgbno); ffs_setblock(fs, blksfree, fragno); ffs_clusteracct(fs, cgp, fragno, 1); cgp->cg_cs.cs_nbfree++; fs->fs_cstotal.cs_nbfree++; fs->fs_cs(fs, cg).cs_nbfree++; } else { bbase = cgbno - fragnum(fs, cgbno); /* * decrement the counts associated with the old frags */ blk = blkmap(fs, blksfree, bbase); ffs_fragacct(fs, blk, cgp->cg_frsum, -1); /* * deallocate the fragment */ frags = numfrags(fs, size); for (i = 0; i < frags; i++) setbit(blksfree, cgbno + i); cgp->cg_cs.cs_nffree += i; fs->fs_cstotal.cs_nffree += i; fs->fs_cs(fs, cg).cs_nffree += i; /* * add back in counts associated with the new frags */ blk = blkmap(fs, blksfree, bbase); ffs_fragacct(fs, blk, cgp->cg_frsum, 1); /* * if a complete block has been reassembled, account for it */ fragno = fragstoblks(fs, bbase); if (ffs_isblock(fs, blksfree, fragno)) { cgp->cg_cs.cs_nffree -= fs->fs_frag; fs->fs_cstotal.cs_nffree -= fs->fs_frag; fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; ffs_clusteracct(fs, cgp, fragno, 1); cgp->cg_cs.cs_nbfree++; fs->fs_cstotal.cs_nbfree++; fs->fs_cs(fs, cg).cs_nbfree++; } } return cgwrite(disk); }
/* * Allocate a block in a cylinder group. * * This algorithm implements the following policy: * 1) allocate the requested block. * 2) allocate a rotationally optimal block in the same cylinder. * 3) allocate the next available block on the block rotor for the * specified cylinder group. * Note that this routine only allocates fs_bsize blocks; these * blocks may be fragmented by the routine that allocates them. */ static daddr_t ffs_alloccgblk(struct inode *ip, struct buf *bp, daddr_t bpref) { struct cg *cgp; daddr_t blkno; int32_t bno; struct fs *fs = ip->i_fs; const int needswap = UFS_FSNEEDSWAP(fs); u_int8_t *blksfree; cgp = (struct cg *)bp->b_data; blksfree = cg_blksfree(cgp, needswap); if (bpref == 0 || dtog(fs, bpref) != ufs_rw32(cgp->cg_cgx, needswap)) { bpref = ufs_rw32(cgp->cg_rotor, needswap); } else { bpref = ffs_blknum(fs, bpref); bno = dtogd(fs, bpref); /* * if the requested block is available, use it */ if (ffs_isblock(fs, blksfree, ffs_fragstoblks(fs, bno))) goto gotit; } /* * Take the next available one in this cylinder group. */ bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag); if (bno < 0) return (0); cgp->cg_rotor = ufs_rw32(bno, needswap); gotit: blkno = ffs_fragstoblks(fs, bno); ffs_clrblock(fs, blksfree, (long)blkno); ffs_clusteracct(fs, cgp, blkno, -1); ufs_add32(cgp->cg_cs.cs_nbfree, -1, needswap); fs->fs_cstotal.cs_nbfree--; fs->fs_cs(fs, ufs_rw32(cgp->cg_cgx, needswap)).cs_nbfree--; fs->fs_fmod = 1; blkno = ufs_rw32(cgp->cg_cgx, needswap) * fs->fs_fpg + bno; return (blkno); }
ufs2_daddr_t cgballoc(struct uufsd *disk) { u_int8_t *blksfree; struct cg *cgp; struct fs *fs; long bno; fs = &disk->d_fs; cgp = &disk->d_cg; blksfree = cg_blksfree(cgp); for (bno = 0; bno < fs->fs_fpg / fs->fs_frag; bno++) if (ffs_isblock(fs, blksfree, bno)) goto gotit; return (0); gotit: fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--; ffs_clrblock(fs, blksfree, (long)bno); ffs_clusteracct(fs, cgp, bno, -1); cgp->cg_cs.cs_nbfree--; fs->fs_cstotal.cs_nbfree--; fs->fs_fmod = 1; return (cgbase(fs, cgp->cg_cgx) + blkstofrags(fs, bno)); }
/* * Free a block or fragment. * * The specified block or fragment is placed back in the * free map. If a fragment is deallocated, a possible * block reassembly is checked. */ void ffs_blkfree(struct inode *ip, daddr_t bno, long size) { struct cg *cgp; struct buf *bp; int32_t fragno, cgbno; int i, error, cg, blk, frags, bbase; struct fs *fs = ip->i_fs; const int needswap = UFS_FSNEEDSWAP(fs); if (size > fs->fs_bsize || ffs_fragoff(fs, size) != 0 || ffs_fragnum(fs, bno) + ffs_numfrags(fs, size) > fs->fs_frag) { errx(1, "blkfree: bad size: bno %lld bsize %d size %ld", (long long)bno, fs->fs_bsize, size); } cg = dtog(fs, bno); if (bno >= fs->fs_size) { warnx("bad block %lld, ino %llu", (long long)bno, (unsigned long long)ip->i_number); return; } error = bread(ip->i_devvp, FFS_FSBTODB(fs, cgtod(fs, cg)), (int)fs->fs_cgsize, 0, &bp); if (error) { brelse(bp, 0); return; } cgp = (struct cg *)bp->b_data; if (!cg_chkmagic(cgp, needswap)) { brelse(bp, 0); return; } cgbno = dtogd(fs, bno); if (size == fs->fs_bsize) { fragno = ffs_fragstoblks(fs, cgbno); if (!ffs_isfreeblock(fs, cg_blksfree(cgp, needswap), fragno)) { errx(1, "blkfree: freeing free block %lld", (long long)bno); } ffs_setblock(fs, cg_blksfree(cgp, needswap), fragno); ffs_clusteracct(fs, cgp, fragno, 1); ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap); fs->fs_cstotal.cs_nbfree++; fs->fs_cs(fs, cg).cs_nbfree++; } else { bbase = cgbno - ffs_fragnum(fs, cgbno); /* * decrement the counts associated with the old frags */ blk = blkmap(fs, cg_blksfree(cgp, needswap), bbase); ffs_fragacct(fs, blk, cgp->cg_frsum, -1, needswap); /* * deallocate the fragment */ frags = ffs_numfrags(fs, size); for (i = 0; i < frags; i++) { if (isset(cg_blksfree(cgp, needswap), cgbno + i)) { errx(1, "blkfree: freeing free frag: block %lld", (long long)(cgbno + i)); } setbit(cg_blksfree(cgp, needswap), cgbno + i); } ufs_add32(cgp->cg_cs.cs_nffree, i, needswap); fs->fs_cstotal.cs_nffree += i; fs->fs_cs(fs, cg).cs_nffree += i; /* * add back in counts associated with the new frags */ blk = blkmap(fs, cg_blksfree(cgp, needswap), bbase); ffs_fragacct(fs, blk, cgp->cg_frsum, 1, needswap); /* * if a complete block has been reassembled, account for it */ fragno = ffs_fragstoblks(fs, bbase); if (ffs_isblock(fs, cg_blksfree(cgp, needswap), fragno)) { ufs_add32(cgp->cg_cs.cs_nffree, -fs->fs_frag, needswap); fs->fs_cstotal.cs_nffree -= fs->fs_frag; fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; ffs_clusteracct(fs, cgp, fragno, 1); ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap); fs->fs_cstotal.cs_nbfree++; fs->fs_cs(fs, cg).cs_nbfree++; } } fs->fs_fmod = 1; bdwrite(bp); }
/* * Find a suitable location for the journal in the filesystem. * * Our strategy here is to look for a contiguous block of free space * at least "logfile" MB in size (plus room for any indirect blocks). * We start at the middle of the filesystem and check each cylinder * group working outwards. If "logfile" MB is not available as a * single contigous chunk, then return the address and size of the * largest chunk found. * * XXX * At what stage does the search fail? Is if the largest space we could * find is less than a quarter the requested space reasonable? If the * search fails entirely, return a block address if "0" it indicate this. */ void wapbl_find_log_start(struct mount *mp, struct vnode *vp, off_t logsize, daddr_t *addr, daddr_t *indir_addr, size_t *size) { struct ufsmount *ump = VFSTOUFS(mp); struct fs *fs = ump->um_fs; struct vnode *devvp = ump->um_devvp; struct cg *cgp; struct buf *bp; uint8_t *blksfree; daddr_t blkno, best_addr, start_addr; daddr_t desired_blks, min_desired_blks; daddr_t freeblks, best_blks; int bpcg, cg, error, fixedsize, indir_blks, n, s; #ifdef FFS_EI const int needswap = UFS_FSNEEDSWAP(fs); #endif if (logsize == 0) { fixedsize = 0; /* We can adjust the size if tight */ logsize = lfragtosize(fs, fs->fs_dsize) / UFS_WAPBL_JOURNAL_SCALE; DPRINTF("suggested log size = %lld\n", logsize); logsize = max(logsize, UFS_WAPBL_MIN_JOURNAL_SIZE); logsize = min(logsize, UFS_WAPBL_MAX_JOURNAL_SIZE); DPRINTF("adjusted log size = %lld\n", logsize); } else { fixedsize = 1; DPRINTF("fixed log size = %lld\n", logsize); } desired_blks = logsize / fs->fs_bsize; DPRINTF("desired blocks = %lld\n", desired_blks); /* add in number of indirect blocks needed */ indir_blks = 0; if (desired_blks >= NDADDR) { struct indir indirs[NIADDR + 2]; int num; error = ufs_getlbns(vp, desired_blks, indirs, &num); if (error) { printf("%s: ufs_getlbns failed, error %d!\n", __func__, error); goto bad; } switch (num) { case 2: indir_blks = 1; /* 1st level indirect */ break; case 3: indir_blks = 1 + /* 1st level indirect */ 1 + /* 2nd level indirect */ indirs[1].in_off + 1; /* extra 1st level indirect */ break; default: printf("%s: unexpected numlevels %d from ufs_getlbns\n", __func__, num); *size = 0; goto bad; } desired_blks += indir_blks; } DPRINTF("desired blocks = %lld (including indirect)\n", desired_blks); /* * If a specific size wasn't requested, allow for a smaller log * if we're really tight for space... */ min_desired_blks = desired_blks; if (!fixedsize) min_desired_blks = desired_blks / 4; /* Look at number of blocks per CG. If it's too small, bail early. */ bpcg = fragstoblks(fs, fs->fs_fpg); if (min_desired_blks > bpcg) { printf("ffs_wapbl: cylinder group size of %lld MB " " is not big enough for journal\n", lblktosize(fs, bpcg) / (1024 * 1024)); goto bad; } /* * Start with the middle cylinder group, and search outwards in * both directions until we either find the requested log size * or reach the start/end of the file system. If we reach the * start/end without finding enough space for the full requested * log size, use the largest extent found if it is large enough * to satisfy the our minimum size. * * XXX * Can we just use the cluster contigsum stuff (esp on UFS2) * here to simplify this search code? */ best_addr = 0; best_blks = 0; for (cg = fs->fs_ncg / 2, s = 0, n = 1; best_blks < desired_blks && cg >= 0 && cg < fs->fs_ncg; s++, n = -n, cg += n * s) { DPRINTF("check cg %d of %d\n", cg, fs->fs_ncg); error = bread(devvp, fsbtodb(fs, cgtod(fs, cg)), fs->fs_cgsize, &bp); if (error) { continue; } cgp = (struct cg *)bp->b_data; if (!cg_chkmagic(cgp)) { brelse(bp); continue; } blksfree = cg_blksfree(cgp); for (blkno = 0; blkno < bpcg;) { /* look for next free block */ /* XXX use scanc() and fragtbl[] here? */ for (; blkno < bpcg - min_desired_blks; blkno++) if (ffs_isblock(fs, blksfree, blkno)) break; /* past end of search space in this CG? */ if (blkno >= bpcg - min_desired_blks) break; /* count how many free blocks in this extent */ start_addr = blkno; for (freeblks = 0; blkno < bpcg; blkno++, freeblks++) if (!ffs_isblock(fs, blksfree, blkno)) break; if (freeblks > best_blks) { best_blks = freeblks; best_addr = blkstofrags(fs, start_addr) + cgbase(fs, cg); if (freeblks >= desired_blks) { DPRINTF("found len %lld" " at offset %lld in gc\n", freeblks, start_addr); break; } } } brelse(bp); } DPRINTF("best found len = %lld, wanted %lld" " at addr %lld\n", best_blks, desired_blks, best_addr); if (best_blks < min_desired_blks) { *addr = 0; *indir_addr = 0; } else { /* put indirect blocks at start, and data blocks after */ *addr = best_addr + blkstofrags(fs, indir_blks); *indir_addr = best_addr; } *size = min(desired_blks, best_blks) - indir_blks; return; bad: *addr = 0; *indir_addr = 0; *size = 0; return; }