/* * Allocate a block in the file system. * * The size of the requested block is given, which must be some * multiple of fs_fsize and <= fs_bsize. * A preference may be optionally specified. If a preference is given * the following hierarchy is used to allocate a block: * 1) allocate the requested block. * 2) allocate a rotationally optimal block in the same cylinder. * 3) allocate a block in the same cylinder group. * 4) quadradically rehash into other cylinder groups, until an * available block is located. * If no block preference is given the following hierarchy is used * to allocate a block: * 1) allocate a block in the cylinder group that contains the * inode for the file. * 2) quadradically rehash into other cylinder groups, until an * available block is located. */ int ffs_alloc(struct inode *ip, daddr_t lbn __unused, daddr_t bpref, int size, daddr_t *bnp) { struct fs *fs = ip->i_fs; daddr_t bno; int cg; *bnp = 0; if (size > fs->fs_bsize || fragoff(fs, size) != 0) { errx(1, "ffs_alloc: bad size: bsize %d size %d", fs->fs_bsize, size); } if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) goto nospace; if (bpref >= fs->fs_size) bpref = 0; if (bpref == 0) cg = ino_to_cg(fs, ip->i_number); else cg = dtog(fs, bpref); bno = ffs_hashalloc(ip, cg, bpref, size, ffs_alloccg); if (bno > 0) { if (ip->i_fs->fs_magic == FS_UFS1_MAGIC) ip->i_ffs1_blocks += size / DEV_BSIZE; else ip->i_ffs2_blocks += size / DEV_BSIZE; *bnp = bno; return (0); } nospace: return (ENOSPC); }
/* * Collect up the data into tape record sized buffers and output them. */ static void ufs2_blksout(union dinode *dp, ufs2_daddr_t *blkp, int frags, ino_t ino, int last) { ufs2_daddr_t *bp; int i, j, count, resid, blks, tbperdb, added; static int writingextdata = 0; /* * Calculate the number of TP_BSIZE blocks to be dumped. * For filesystems with a fragment size bigger than TP_BSIZE, * only part of the final fragment may need to be dumped. */ blks = howmany(frags * sblock->fs_fsize, TP_BSIZE); if (last) { resid = howmany(fragoff(sblock, dp->dp2.di_size), TP_BSIZE); if (resid > 0) blks -= howmany(sblock->fs_fsize, TP_BSIZE) - resid; } tbperdb = sblock->fs_bsize >> tp_bshift; for (i = 0; i < blks; i += TP_NINDIR) { if (i + TP_NINDIR > blks) count = blks; else count = i + TP_NINDIR; for (j = i; j < count; j++) if (blkp[j / tbperdb] != 0) spcl.c_addr[j - i] = 1; else spcl.c_addr[j - i] = 0; spcl.c_count = count - i; if (last && count == blks && !writingextdata) added = appendextdata(dp); writeheader(ino); bp = &blkp[i / tbperdb]; for (j = i; j < count; j += tbperdb, bp++) if (*bp != 0) { if (j + tbperdb <= count) dumpblock(*bp, (int)sblock->fs_bsize); else dumpblock(*bp, (count - j) * TP_BSIZE); } spcl.c_type = TS_ADDR; spcl.c_count = 0; if (last && count == blks && !writingextdata) { writingextdata = 1; writeextdata(dp, ino, added); writingextdata = 0; } } }
/* * Free a block or fragment. * * The specified block or fragment is placed back in the * free map. If a fragment is deallocated, a possible * block reassembly is checked. */ void ffs_blkfree(struct inode *ip, daddr_t bno, long size) { struct cg *cgp; struct buf *bp; int32_t fragno, cgbno; int i, error, cg, blk, frags, bbase; struct fs *fs = ip->i_fs; const int needswap = UFS_FSNEEDSWAP(fs); if (size > fs->fs_bsize || fragoff(fs, size) != 0 || fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) { errx(1, "blkfree: bad size: bno %lld bsize %d size %ld", (long long)bno, fs->fs_bsize, size); } cg = dtog(fs, bno); if (bno >= fs->fs_size) { warnx("bad block %lld, ino %ju", (long long)bno, (uintmax_t)ip->i_number); return; } error = bread(ip->i_fd, ip->i_fs, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize, &bp); if (error) { brelse(bp); return; } cgp = (struct cg *)bp->b_data; if (!cg_chkmagic_swap(cgp, needswap)) { brelse(bp); return; } cgbno = dtogd(fs, bno); if (size == fs->fs_bsize) { fragno = fragstoblks(fs, cgbno); if (!ffs_isfreeblock(fs, cg_blksfree_swap(cgp, needswap), fragno)) { errx(1, "blkfree: freeing free block %lld", (long long)bno); } ffs_setblock(fs, cg_blksfree_swap(cgp, needswap), fragno); ffs_clusteracct(fs, cgp, fragno, 1); ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap); fs->fs_cstotal.cs_nbfree++; fs->fs_cs(fs, cg).cs_nbfree++; } else { bbase = cgbno - fragnum(fs, cgbno); /* * decrement the counts associated with the old frags */ blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bbase); ffs_fragacct_swap(fs, blk, cgp->cg_frsum, -1, needswap); /* * deallocate the fragment */ frags = numfrags(fs, size); for (i = 0; i < frags; i++) { if (isset(cg_blksfree_swap(cgp, needswap), cgbno + i)) { errx(1, "blkfree: freeing free frag: block %lld", (long long)(cgbno + i)); } setbit(cg_blksfree_swap(cgp, needswap), cgbno + i); } ufs_add32(cgp->cg_cs.cs_nffree, i, needswap); fs->fs_cstotal.cs_nffree += i; fs->fs_cs(fs, cg).cs_nffree += i; /* * add back in counts associated with the new frags */ blk = blkmap(fs, cg_blksfree_swap(cgp, needswap), bbase); ffs_fragacct_swap(fs, blk, cgp->cg_frsum, 1, needswap); /* * if a complete block has been reassembled, account for it */ fragno = fragstoblks(fs, bbase); if (ffs_isblock(fs, cg_blksfree_swap(cgp, needswap), fragno)) { ufs_add32(cgp->cg_cs.cs_nffree, -fs->fs_frag, needswap); fs->fs_cstotal.cs_nffree -= fs->fs_frag; fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; ffs_clusteracct(fs, cgp, fragno, 1); ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap); fs->fs_cstotal.cs_nbfree++; fs->fs_cs(fs, cg).cs_nbfree++; } } fs->fs_fmod = 1; bdwrite(bp); }