/** * ext4_free_blocks() -- Free given blocks and update quota * @handle: handle for this transaction * @inode: inode * @block: start physical block to free * @count: number of blocks to count * @metadata: Are these metadata blocks */ void ext4_free_blocks(handle_t *handle, struct inode *inode, ext4_fsblk_t block, unsigned long count, int metadata) { struct super_block *sb; unsigned long dquot_freed_blocks; /* this isn't the right place to decide whether block is metadata * inode.c/extents.c knows better, but for safety ... */ if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) metadata = 1; /* We need to make sure we don't reuse * block released untill the transaction commit. * writeback mode have weak data consistency so * don't force data as metadata when freeing block * for writeback mode. */ if (metadata == 0 && !ext4_should_writeback_data(inode)) metadata = 1; sb = inode->i_sb; ext4_mb_free_blocks(handle, inode, block, count, metadata, &dquot_freed_blocks); if (dquot_freed_blocks) DQUOT_FREE_BLOCK(inode, dquot_freed_blocks); return; }
static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno, u64 goal, unsigned count, int *err) { struct super_block * sb; struct ufs_sb_private_info * uspi; struct ufs_super_block_first * usb1; struct ufs_cg_private_info * ucpi; struct ufs_cylinder_group * ucg; unsigned oldcg, i, j, k, allocsize; u64 result; UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n", inode->i_ino, cgno, (unsigned long long)goal, count); sb = inode->i_sb; uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first(uspi); oldcg = cgno; /* * 1. searching on preferred cylinder group */ UFS_TEST_FREE_SPACE_CG /* * 2. quadratic rehash */ for (j = 1; j < uspi->s_ncg; j *= 2) { cgno += j; if (cgno >= uspi->s_ncg) cgno -= uspi->s_ncg; UFS_TEST_FREE_SPACE_CG } /* * 3. brute force search * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step ) */ cgno = (oldcg + 1) % uspi->s_ncg; for (j = 2; j < uspi->s_ncg; j++) { cgno++; if (cgno >= uspi->s_ncg) cgno = 0; UFS_TEST_FREE_SPACE_CG } UFSD("EXIT (FAILED)\n"); return 0; cg_found: ucpi = ufs_load_cylinder (sb, cgno); if (!ucpi) return 0; ucg = ubh_get_ucg (UCPI_UBH(ucpi)); if (!ufs_cg_chkmagic(sb, ucg)) ufs_panic (sb, "ufs_alloc_fragments", "internal error, bad magic number on cg %u", cgno); ucg->cg_time = cpu_to_fs32(sb, get_seconds()); if (count == uspi->s_fpb) { result = ufs_alloccg_block (inode, ucpi, goal, err); if (result == INVBLOCK) return 0; goto succed; } for (allocsize = count; allocsize < uspi->s_fpb; allocsize++) if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0) break; if (allocsize == uspi->s_fpb) { result = ufs_alloccg_block (inode, ucpi, goal, err); if (result == INVBLOCK) return 0; goal = ufs_dtogd(uspi, result); for (i = count; i < uspi->s_fpb; i++) ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i); i = uspi->s_fpb - count; DQUOT_FREE_BLOCK(inode, i); fs32_add(sb, &ucg->cg_cs.cs_nffree, i); uspi->cs_total.cs_nffree += i; fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i); fs32_add(sb, &ucg->cg_frsum[i], 1); goto succed; } result = ufs_bitmap_search (sb, ucpi, goal, allocsize); if (result == INVBLOCK) return 0; if(DQUOT_ALLOC_BLOCK(inode, count)) { *err = -EDQUOT; return 0; } for (i = 0; i < count; i++) ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i); fs32_sub(sb, &ucg->cg_cs.cs_nffree, count); uspi->cs_total.cs_nffree -= count; fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count); fs32_sub(sb, &ucg->cg_frsum[allocsize], 1); if (count != allocsize) fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1); succed: ubh_mark_buffer_dirty (USPI_UBH(uspi)); ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); if (sb->s_flags & MS_SYNCHRONOUS) { ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); ubh_wait_on_buffer (UCPI_UBH(ucpi)); } mark_sb_dirty(sb); result += cgno * uspi->s_fpg; UFSD("EXIT3, result %llu\n", (unsigned long long)result); return result; }
/* * Free 'count' fragments from fragment number 'fragment' */ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count) { struct super_block * sb; struct ufs_sb_private_info * uspi; struct ufs_super_block_first * usb1; struct ufs_cg_private_info * ucpi; struct ufs_cylinder_group * ucg; unsigned cgno, bit, end_bit, bbase, blkmap, i; u64 blkno; sb = inode->i_sb; uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first(uspi); UFSD("ENTER, fragment %llu, count %u\n", (unsigned long long)fragment, count); if (ufs_fragnum(fragment) + count > uspi->s_fpg) ufs_error (sb, "ufs_free_fragments", "internal error"); lock_super(sb); cgno = ufs_dtog(uspi, fragment); bit = ufs_dtogd(uspi, fragment); if (cgno >= uspi->s_ncg) { ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device"); goto failed; } ucpi = ufs_load_cylinder (sb, cgno); if (!ucpi) goto failed; ucg = ubh_get_ucg (UCPI_UBH(ucpi)); if (!ufs_cg_chkmagic(sb, ucg)) { ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno); goto failed; } end_bit = bit + count; bbase = ufs_blknum (bit); blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase); ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1); for (i = bit; i < end_bit; i++) { if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i)) ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i); else ufs_error (sb, "ufs_free_fragments", "bit already cleared for fragment %u", i); } DQUOT_FREE_BLOCK (inode, count); fs32_add(sb, &ucg->cg_cs.cs_nffree, count); uspi->cs_total.cs_nffree += count; fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count); blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase); ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1); /* * Trying to reassemble free fragments into block */ blkno = ufs_fragstoblks (bbase); if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) { fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb); uspi->cs_total.cs_nffree -= uspi->s_fpb; fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb); if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) ufs_clusteracct (sb, ucpi, blkno, 1); fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1); uspi->cs_total.cs_nbfree++; fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1); if (uspi->fs_magic != UFS2_MAGIC) { unsigned cylno = ufs_cbtocylno (bbase); fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(bbase)), 1); fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1); } } ubh_mark_buffer_dirty (USPI_UBH(uspi)); ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); if (sb->s_flags & MS_SYNCHRONOUS) { ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); ubh_wait_on_buffer (UCPI_UBH(ucpi)); } mark_sb_dirty(sb); unlock_super (sb); UFSD("EXIT\n"); return; failed: unlock_super (sb); UFSD("EXIT (FAILED)\n"); return; }
/* * Free 'count' fragments from fragment number 'fragment' (free whole blocks) */ void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count) { struct super_block * sb; struct ufs_sb_private_info * uspi; struct ufs_super_block_first * usb1; struct ufs_cg_private_info * ucpi; struct ufs_cylinder_group * ucg; unsigned overflow, cgno, bit, end_bit, i; u64 blkno; sb = inode->i_sb; uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first(uspi); UFSD("ENTER, fragment %llu, count %u\n", (unsigned long long)fragment, count); if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) { ufs_error (sb, "ufs_free_blocks", "internal error, " "fragment %llu, count %u\n", (unsigned long long)fragment, count); goto failed; } lock_super(sb); do_more: overflow = 0; cgno = ufs_dtog(uspi, fragment); bit = ufs_dtogd(uspi, fragment); if (cgno >= uspi->s_ncg) { ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device"); goto failed_unlock; } end_bit = bit + count; if (end_bit > uspi->s_fpg) { overflow = bit + count - uspi->s_fpg; count -= overflow; end_bit -= overflow; } ucpi = ufs_load_cylinder (sb, cgno); if (!ucpi) goto failed_unlock; ucg = ubh_get_ucg (UCPI_UBH(ucpi)); if (!ufs_cg_chkmagic(sb, ucg)) { ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno); goto failed_unlock; } for (i = bit; i < end_bit; i += uspi->s_fpb) { blkno = ufs_fragstoblks(i); if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) { ufs_error(sb, "ufs_free_blocks", "freeing free fragment"); } ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) ufs_clusteracct (sb, ucpi, blkno, 1); DQUOT_FREE_BLOCK(inode, uspi->s_fpb); fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1); uspi->cs_total.cs_nbfree++; fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1); if (uspi->fs_magic != UFS2_MAGIC) { unsigned cylno = ufs_cbtocylno(i); fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(i)), 1); fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1); } } ubh_mark_buffer_dirty (USPI_UBH(uspi)); ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); if (sb->s_flags & MS_SYNCHRONOUS) { ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); ubh_wait_on_buffer (UCPI_UBH(ucpi)); } if (overflow) { fragment += count; count = overflow; goto do_more; } mark_sb_dirty(sb); unlock_super (sb); UFSD("EXIT\n"); return; failed_unlock: unlock_super (sb); failed: UFSD("EXIT (FAILED)\n"); return; }
/* * Free 'count' fragments from fragment number 'fragment' */ void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count) { struct super_block * sb; struct ufs_sb_private_info * uspi; struct ufs_super_block_first * usb1; struct ufs_cg_private_info * ucpi; struct ufs_cylinder_group * ucg; unsigned cgno, bit, end_bit, bbase, blkmap, i, blkno, cylno; unsigned swab; sb = inode->i_sb; uspi = sb->u.ufs_sb.s_uspi; swab = sb->u.ufs_sb.s_swab; usb1 = ubh_get_usb_first(USPI_UBH); UFSD(("ENTER, fragment %u, count %u\n", fragment, count)) if (ufs_fragnum(fragment) + count > uspi->s_fpg) ufs_error (sb, "ufs_free_fragments", "internal error"); lock_super(sb); cgno = ufs_dtog(fragment); bit = ufs_dtogd(fragment); if (cgno >= uspi->s_ncg) { ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device"); goto failed; } ucpi = ufs_load_cylinder (sb, cgno); if (!ucpi) goto failed; ucg = ubh_get_ucg (UCPI_UBH); if (!ufs_cg_chkmagic (ucg)) { ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno); goto failed; } end_bit = bit + count; bbase = ufs_blknum (bit); blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase); ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1); for (i = bit; i < end_bit; i++) { if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, i)) ubh_setbit (UCPI_UBH, ucpi->c_freeoff, i); else ufs_error (sb, "ufs_free_fragments", "bit already cleared for fragment %u", i); } DQUOT_FREE_BLOCK (sb, inode, count); ADD_SWAB32(ucg->cg_cs.cs_nffree, count); ADD_SWAB32(usb1->fs_cstotal.cs_nffree, count); ADD_SWAB32(sb->fs_cs(cgno).cs_nffree, count); blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase); ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1); /* * Trying to reassemble free fragments into block */ blkno = ufs_fragstoblks (bbase); if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) { SUB_SWAB32(ucg->cg_cs.cs_nffree, uspi->s_fpb); SUB_SWAB32(usb1->fs_cstotal.cs_nffree, uspi->s_fpb); SUB_SWAB32(sb->fs_cs(cgno).cs_nffree, uspi->s_fpb); if ((sb->u.ufs_sb.s_flags & UFS_CG_MASK) == UFS_CG_44BSD) ufs_clusteracct (sb, ucpi, blkno, 1); INC_SWAB32(ucg->cg_cs.cs_nbfree); INC_SWAB32(usb1->fs_cstotal.cs_nbfree); INC_SWAB32(sb->fs_cs(cgno).cs_nbfree); cylno = ufs_cbtocylno (bbase); INC_SWAB16(ubh_cg_blks (ucpi, cylno, ufs_cbtorpos(bbase))); INC_SWAB32(ubh_cg_blktot (ucpi, cylno)); } ubh_mark_buffer_dirty (USPI_UBH); ubh_mark_buffer_dirty (UCPI_UBH); if (sb->s_flags & MS_SYNCHRONOUS) { ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi); ubh_wait_on_buffer (UCPI_UBH); } sb->s_dirt = 1; unlock_super (sb); UFSD(("EXIT\n")) return; failed: unlock_super (sb); UFSD(("EXIT (FAILED)\n")) return; }
/* * Free 'count' fragments from fragment number 'fragment' (free whole blocks) */ void ufs_free_blocks (struct inode * inode, unsigned fragment, unsigned count) { struct super_block * sb; struct ufs_sb_private_info * uspi; struct ufs_super_block_first * usb1; struct ufs_cg_private_info * ucpi; struct ufs_cylinder_group * ucg; unsigned overflow, cgno, bit, end_bit, blkno, i, cylno; unsigned swab; sb = inode->i_sb; uspi = sb->u.ufs_sb.s_uspi; swab = sb->u.ufs_sb.s_swab; usb1 = ubh_get_usb_first(USPI_UBH); UFSD(("ENTER, fragment %u, count %u\n", fragment, count)) if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) { ufs_error (sb, "ufs_free_blocks", "internal error, " "fragment %u, count %u\n", fragment, count); goto failed; } lock_super(sb); do_more: overflow = 0; cgno = ufs_dtog (fragment); bit = ufs_dtogd (fragment); if (cgno >= uspi->s_ncg) { ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device"); goto failed; } end_bit = bit + count; if (end_bit > uspi->s_fpg) { overflow = bit + count - uspi->s_fpg; count -= overflow; end_bit -= overflow; } ucpi = ufs_load_cylinder (sb, cgno); if (!ucpi) goto failed; ucg = ubh_get_ucg (UCPI_UBH); if (!ufs_cg_chkmagic (ucg)) { ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno); goto failed; } for (i = bit; i < end_bit; i += uspi->s_fpb) { blkno = ufs_fragstoblks(i); if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) { ufs_error(sb, "ufs_free_blocks", "freeing free fragment"); } ubh_setblock(UCPI_UBH, ucpi->c_freeoff, blkno); if ((sb->u.ufs_sb.s_flags & UFS_CG_MASK) == UFS_CG_44BSD) ufs_clusteracct (sb, ucpi, blkno, 1); DQUOT_FREE_BLOCK(sb, inode, uspi->s_fpb); INC_SWAB32(ucg->cg_cs.cs_nbfree); INC_SWAB32(usb1->fs_cstotal.cs_nbfree); INC_SWAB32(sb->fs_cs(cgno).cs_nbfree); cylno = ufs_cbtocylno(i); INC_SWAB16(ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(i))); INC_SWAB32(ubh_cg_blktot(ucpi, cylno)); } ubh_mark_buffer_dirty (USPI_UBH); ubh_mark_buffer_dirty (UCPI_UBH); if (sb->s_flags & MS_SYNCHRONOUS) { ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi); ubh_wait_on_buffer (UCPI_UBH); } if (overflow) { fragment += count; count = overflow; goto do_more; } sb->s_dirt = 1; unlock_super (sb); UFSD(("EXIT\n")) return; failed: unlock_super (sb); UFSD(("EXIT (FAILED)\n")) return; }