int ufs_check_dir_entry (const char * function, struct inode * dir, struct ufs_dir_entry * de, struct buffer_head * bh, unsigned long offset) { struct super_block * sb; const char * error_msg; unsigned flags, swab; sb = dir->i_sb; flags = sb->u.ufs_sb.s_flags; swab = sb->u.ufs_sb.s_swab; error_msg = NULL; if (SWAB16(de->d_reclen) < UFS_DIR_REC_LEN(1)) error_msg = "reclen is smaller than minimal"; else if (SWAB16(de->d_reclen) % 4 != 0) error_msg = "reclen % 4 != 0"; else if (SWAB16(de->d_reclen) < UFS_DIR_REC_LEN(ufs_get_de_namlen(de))) error_msg = "reclen is too small for namlen"; else if (dir && ((char *) de - bh->b_data) + SWAB16(de->d_reclen) > dir->i_sb->s_blocksize) error_msg = "directory entry across blocks"; else if (dir && SWAB32(de->d_ino) > (sb->u.ufs_sb.s_uspi->s_ipg * sb->u.ufs_sb.s_uspi->s_ncg)) error_msg = "inode out of bounds"; if (error_msg != NULL) ufs_error (sb, function, "bad entry in directory #%lu, size %lu: %s - " "offset=%lu, inode=%lu, reclen=%d, namlen=%d", dir->i_ino, dir->i_size, error_msg, offset, (unsigned long) SWAB32(de->d_ino), SWAB16(de->d_reclen), ufs_get_de_namlen(de)); return (error_msg == NULL ? 1 : 0); }
unsigned ufs_new_fragments (struct inode * inode, u32 * p, unsigned fragment, unsigned goal, unsigned count, int * err ) { struct super_block * sb; struct ufs_sb_private_info * uspi; struct ufs_super_block_first * usb1; struct buffer_head * bh; unsigned cgno, oldcount, newcount, tmp, request, i, result; unsigned swab; UFSD(("ENTER, ino %lu, fragment %u, goal %u, count %u\n", inode->i_ino, fragment, goal, count)) sb = inode->i_sb; swab = sb->u.ufs_sb.s_swab; uspi = sb->u.ufs_sb.s_uspi; usb1 = ubh_get_usb_first(USPI_UBH); *err = -ENOSPC; lock_super (sb); tmp = SWAB32(*p); if (count + ufs_fragnum(fragment) > uspi->s_fpb) { ufs_warning (sb, "ufs_new_fragments", "internal warning" " fragment %u, count %u", fragment, count); count = uspi->s_fpb - ufs_fragnum(fragment); } oldcount = ufs_fragnum (fragment); newcount = oldcount + count; /* * Somebody else has just allocated our fragments */ if (oldcount) { if (!tmp) { ufs_error (sb, "ufs_new_fragments", "internal error, " "fragment %u, tmp %u\n", fragment, tmp); return (unsigned)-1; } if (fragment < inode->u.ufs_i.i_lastfrag) { UFSD(("EXIT (ALREADY ALLOCATED)\n")) unlock_super (sb); return 0; } } else { if (tmp) { UFSD(("EXIT (ALREADY ALLOCATED)\n")) unlock_super(sb); return 0; } } /* * There is not enough space for user on the device */ if (!fsuser() && ufs_freespace(usb1, UFS_MINFREE) <= 0) { unlock_super (sb); UFSD(("EXIT (FAILED)\n")) return 0; }
/* * Read cylinder group into cache. The memory space for ufs_cg_private_info * structure is already allocated during ufs_read_super. */ static void ufs_read_cylinder (struct super_block * sb, unsigned cgno, unsigned bitmap_nr) { struct ufs_sb_info * sbi = UFS_SB(sb); struct ufs_sb_private_info * uspi; struct ufs_cg_private_info * ucpi; struct ufs_cylinder_group * ucg; unsigned i, j; UFSD("ENTER, cgno %u, bitmap_nr %u\n", cgno, bitmap_nr); uspi = sbi->s_uspi; ucpi = sbi->s_ucpi[bitmap_nr]; ucg = (struct ufs_cylinder_group *)sbi->s_ucg[cgno]->b_data; UCPI_UBH(ucpi)->fragment = ufs_cgcmin(cgno); UCPI_UBH(ucpi)->count = uspi->s_cgsize >> sb->s_blocksize_bits; /* * We have already the first fragment of cylinder group block in buffer */ UCPI_UBH(ucpi)->bh[0] = sbi->s_ucg[cgno]; for (i = 1; i < UCPI_UBH(ucpi)->count; i++) if (!(UCPI_UBH(ucpi)->bh[i] = sb_bread(sb, UCPI_UBH(ucpi)->fragment + i))) goto failed; sbi->s_cgno[bitmap_nr] = cgno; ucpi->c_cgx = fs32_to_cpu(sb, ucg->cg_cgx); ucpi->c_ncyl = fs16_to_cpu(sb, ucg->cg_ncyl); ucpi->c_niblk = fs16_to_cpu(sb, ucg->cg_niblk); ucpi->c_ndblk = fs32_to_cpu(sb, ucg->cg_ndblk); ucpi->c_rotor = fs32_to_cpu(sb, ucg->cg_rotor); ucpi->c_frotor = fs32_to_cpu(sb, ucg->cg_frotor); ucpi->c_irotor = fs32_to_cpu(sb, ucg->cg_irotor); ucpi->c_btotoff = fs32_to_cpu(sb, ucg->cg_btotoff); ucpi->c_boff = fs32_to_cpu(sb, ucg->cg_boff); ucpi->c_iusedoff = fs32_to_cpu(sb, ucg->cg_iusedoff); ucpi->c_freeoff = fs32_to_cpu(sb, ucg->cg_freeoff); ucpi->c_nextfreeoff = fs32_to_cpu(sb, ucg->cg_nextfreeoff); ucpi->c_clustersumoff = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_clustersumoff); ucpi->c_clusteroff = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_clusteroff); ucpi->c_nclusterblks = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_nclusterblks); UFSD("EXIT\n"); return; failed: for (j = 1; j < i; j++) brelse (sbi->s_ucg[j]); sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY; ufs_error (sb, "ufs_read_cylinder", "can't read cylinder group block %u", cgno); }
/* * Free 'count' fragments from fragment number 'fragment' */ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count) { struct super_block * sb; struct ufs_sb_private_info * uspi; struct ufs_super_block_first * usb1; struct ufs_cg_private_info * ucpi; struct ufs_cylinder_group * ucg; unsigned cgno, bit, end_bit, bbase, blkmap, i; u64 blkno; sb = inode->i_sb; uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first(uspi); UFSD("ENTER, fragment %llu, count %u\n", (unsigned long long)fragment, count); if (ufs_fragnum(fragment) + count > uspi->s_fpg) ufs_error (sb, "ufs_free_fragments", "internal error"); lock_super(sb); cgno = ufs_dtog(uspi, fragment); bit = ufs_dtogd(uspi, fragment); if (cgno >= uspi->s_ncg) { ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device"); goto failed; } ucpi = ufs_load_cylinder (sb, cgno); if (!ucpi) goto failed; ucg = ubh_get_ucg (UCPI_UBH(ucpi)); if (!ufs_cg_chkmagic(sb, ucg)) { ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno); goto failed; } end_bit = bit + count; bbase = ufs_blknum (bit); blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase); ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1); for (i = bit; i < end_bit; i++) { if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i)) ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i); else ufs_error (sb, "ufs_free_fragments", "bit already cleared for fragment %u", i); } DQUOT_FREE_BLOCK (inode, count); fs32_add(sb, &ucg->cg_cs.cs_nffree, count); uspi->cs_total.cs_nffree += count; fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count); blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase); ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1); /* * Trying to reassemble free fragments into block */ blkno = ufs_fragstoblks (bbase); if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) { fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb); uspi->cs_total.cs_nffree -= uspi->s_fpb; fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb); if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) ufs_clusteracct (sb, ucpi, blkno, 1); fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1); uspi->cs_total.cs_nbfree++; fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1); if (uspi->fs_magic != UFS2_MAGIC) { unsigned cylno = ufs_cbtocylno (bbase); fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(bbase)), 1); fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1); } } ubh_mark_buffer_dirty (USPI_UBH(uspi)); ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); if (sb->s_flags & MS_SYNCHRONOUS) { ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); ubh_wait_on_buffer (UCPI_UBH(ucpi)); } mark_sb_dirty(sb); unlock_super (sb); UFSD("EXIT\n"); return; failed: unlock_super (sb); UFSD("EXIT (FAILED)\n"); return; }
u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment, u64 goal, unsigned count, int *err, struct page *locked_page) { struct super_block * sb; struct ufs_sb_private_info * uspi; struct ufs_super_block_first * usb1; unsigned cgno, oldcount, newcount; u64 tmp, request, result; UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n", inode->i_ino, (unsigned long long)fragment, (unsigned long long)goal, count); sb = inode->i_sb; uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first(uspi); *err = -ENOSPC; lock_super (sb); tmp = ufs_data_ptr_to_cpu(sb, p); if (count + ufs_fragnum(fragment) > uspi->s_fpb) { ufs_warning(sb, "ufs_new_fragments", "internal warning" " fragment %llu, count %u", (unsigned long long)fragment, count); count = uspi->s_fpb - ufs_fragnum(fragment); } oldcount = ufs_fragnum (fragment); newcount = oldcount + count; /* * Somebody else has just allocated our fragments */ if (oldcount) { if (!tmp) { ufs_error(sb, "ufs_new_fragments", "internal error, " "fragment %llu, tmp %llu\n", (unsigned long long)fragment, (unsigned long long)tmp); unlock_super(sb); return INVBLOCK; } if (fragment < UFS_I(inode)->i_lastfrag) { UFSD("EXIT (ALREADY ALLOCATED)\n"); unlock_super (sb); return 0; } } else { if (tmp) { UFSD("EXIT (ALREADY ALLOCATED)\n"); unlock_super(sb); return 0; } } /* * There is not enough space for user on the device */ if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) { unlock_super (sb); UFSD("EXIT (FAILED)\n"); return 0; } if (goal >= uspi->s_size) goal = 0; if (goal == 0) cgno = ufs_inotocg (inode->i_ino); else cgno = ufs_dtog(uspi, goal); /* * allocate new fragment */ if (oldcount == 0) { result = ufs_alloc_fragments (inode, cgno, goal, count, err); if (result) { ufs_cpu_to_data_ptr(sb, p, result); *err = 0; UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count); ufs_clear_frags(inode, result + oldcount, newcount - oldcount, locked_page != NULL); } unlock_super(sb); UFSD("EXIT, result %llu\n", (unsigned long long)result); return result; } /* * resize block */ result = ufs_add_fragments (inode, tmp, oldcount, newcount, err); if (result) { *err = 0; UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count); ufs_clear_frags(inode, result + oldcount, newcount - oldcount, locked_page != NULL); unlock_super(sb); UFSD("EXIT, result %llu\n", (unsigned long long)result); return result; } /* * allocate new block and move data */ switch (fs32_to_cpu(sb, usb1->fs_optim)) { case UFS_OPTSPACE: request = newcount; if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree > uspi->s_dsize * uspi->s_minfree / (2 * 100)) break; usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME); break; default: usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME); case UFS_OPTTIME: request = uspi->s_fpb; if (uspi->cs_total.cs_nffree < uspi->s_dsize * (uspi->s_minfree - 2) / 100) break; usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME); break; } result = ufs_alloc_fragments (inode, cgno, goal, request, err); if (result) { ufs_clear_frags(inode, result + oldcount, newcount - oldcount, locked_page != NULL); ufs_change_blocknr(inode, fragment - oldcount, oldcount, uspi->s_sbbase + tmp, uspi->s_sbbase + result, locked_page); ufs_cpu_to_data_ptr(sb, p, result); *err = 0; UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count); unlock_super(sb); if (newcount < request) ufs_free_fragments (inode, result + newcount, request - newcount); ufs_free_fragments (inode, tmp, oldcount); UFSD("EXIT, result %llu\n", (unsigned long long)result); return result; } unlock_super(sb); UFSD("EXIT (FAILED)\n"); return 0; }
/* * Modify inode page cache in such way: * have - blocks with b_blocknr equal to oldb...oldb+count-1 * get - blocks with b_blocknr equal to newb...newb+count-1 * also we suppose that oldb...oldb+count-1 blocks * situated at the end of file. * * We can come here from ufs_writepage or ufs_prepare_write, * locked_page is argument of these functions, so we already lock it. */ static void ufs_change_blocknr(struct inode *inode, sector_t beg, unsigned int count, sector_t oldb, sector_t newb, struct page *locked_page) { const unsigned blks_per_page = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits); const unsigned mask = blks_per_page - 1; struct address_space * const mapping = inode->i_mapping; pgoff_t index, cur_index, last_index; unsigned pos, j, lblock; sector_t end, i; struct page *page; struct buffer_head *head, *bh; UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n", inode->i_ino, count, (unsigned long long)oldb, (unsigned long long)newb); BUG_ON(!locked_page); BUG_ON(!PageLocked(locked_page)); cur_index = locked_page->index; end = count + beg; last_index = end >> (PAGE_CACHE_SHIFT - inode->i_blkbits); for (i = beg; i < end; i = (i | mask) + 1) { index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits); if (likely(cur_index != index)) { page = ufs_get_locked_page(mapping, index); if (!page)/* it was truncated */ continue; if (IS_ERR(page)) {/* or EIO */ ufs_error(inode->i_sb, __func__, "read of page %llu failed\n", (unsigned long long)index); continue; } } else page = locked_page; head = page_buffers(page); bh = head; pos = i & mask; for (j = 0; j < pos; ++j) bh = bh->b_this_page; if (unlikely(index == last_index)) lblock = end & mask; else lblock = blks_per_page; do { if (j >= lblock) break; pos = (i - beg) + j; if (!buffer_mapped(bh)) map_bh(bh, inode->i_sb, oldb + pos); if (!buffer_uptodate(bh)) { ll_rw_block(READ, 1, &bh); wait_on_buffer(bh); if (!buffer_uptodate(bh)) { ufs_error(inode->i_sb, __func__, "read of block failed\n"); break; } } UFSD(" change from %llu to %llu, pos %u\n", (unsigned long long)(pos + oldb), (unsigned long long)(pos + newb), pos); bh->b_blocknr = newb + pos; unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); mark_buffer_dirty(bh); ++j; bh = bh->b_this_page; } while (bh != head); if (likely(cur_index != index)) ufs_put_locked_page(page); } UFSD("EXIT\n"); }
/* * Free 'count' fragments from fragment number 'fragment' (free whole blocks) */ void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count) { struct super_block * sb; struct ufs_sb_private_info * uspi; struct ufs_super_block_first * usb1; struct ufs_cg_private_info * ucpi; struct ufs_cylinder_group * ucg; unsigned overflow, cgno, bit, end_bit, i; u64 blkno; sb = inode->i_sb; uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first(uspi); UFSD("ENTER, fragment %llu, count %u\n", (unsigned long long)fragment, count); if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) { ufs_error (sb, "ufs_free_blocks", "internal error, " "fragment %llu, count %u\n", (unsigned long long)fragment, count); goto failed; } lock_super(sb); do_more: overflow = 0; cgno = ufs_dtog(uspi, fragment); bit = ufs_dtogd(uspi, fragment); if (cgno >= uspi->s_ncg) { ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device"); goto failed_unlock; } end_bit = bit + count; if (end_bit > uspi->s_fpg) { overflow = bit + count - uspi->s_fpg; count -= overflow; end_bit -= overflow; } ucpi = ufs_load_cylinder (sb, cgno); if (!ucpi) goto failed_unlock; ucg = ubh_get_ucg (UCPI_UBH(ucpi)); if (!ufs_cg_chkmagic(sb, ucg)) { ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno); goto failed_unlock; } for (i = bit; i < end_bit; i += uspi->s_fpb) { blkno = ufs_fragstoblks(i); if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) { ufs_error(sb, "ufs_free_blocks", "freeing free fragment"); } ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) ufs_clusteracct (sb, ucpi, blkno, 1); DQUOT_FREE_BLOCK(inode, uspi->s_fpb); fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1); uspi->cs_total.cs_nbfree++; fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1); if (uspi->fs_magic != UFS2_MAGIC) { unsigned cylno = ufs_cbtocylno(i); fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(i)), 1); fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1); } } ubh_mark_buffer_dirty (USPI_UBH(uspi)); ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); if (sb->s_flags & MS_SYNCHRONOUS) { ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); ubh_wait_on_buffer (UCPI_UBH(ucpi)); } if (overflow) { fragment += count; count = overflow; goto do_more; } mark_sb_dirty(sb); unlock_super (sb); UFSD("EXIT\n"); return; failed_unlock: unlock_super (sb); failed: UFSD("EXIT (FAILED)\n"); return; }
/* * NOTE! When we get the inode, we're the only people * that have access to it, and as such there are no * race conditions we have to worry about. The inode * is not on the hash-lists, and it cannot be reached * through the filesystem because the directory entry * has been deleted earlier. * * HOWEVER: we must make sure that we get no aliases, * which means that we have to call "clear_inode()" * _before_ we mark the inode not in use in the inode * bitmaps. Otherwise a newly created file might use * the same inode number (not actually the same pointer * though), and then we'd have two inodes sharing the * same inode number and space on the harddisk. */ void ufs_free_inode (struct inode * inode) { struct super_block * sb; struct ufs_sb_private_info * uspi; struct ufs_super_block_first * usb1; struct ufs_cg_private_info * ucpi; struct ufs_cylinder_group * ucg; int is_directory; unsigned ino, cg, bit; UFSD("ENTER, ino %lu\n", inode->i_ino); sb = inode->i_sb; uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first(uspi); ino = inode->i_ino; lock_super (sb); if (!((ino > 1) && (ino < (uspi->s_ncg * uspi->s_ipg )))) { ufs_warning(sb, "ufs_free_inode", "reserved inode or nonexistent inode %u\n", ino); unlock_super (sb); return; } cg = ufs_inotocg (ino); bit = ufs_inotocgoff (ino); ucpi = ufs_load_cylinder (sb, cg); if (!ucpi) { unlock_super (sb); return; } ucg = ubh_get_ucg(UCPI_UBH(ucpi)); if (!ufs_cg_chkmagic(sb, ucg)) ufs_panic (sb, "ufs_free_fragments", "internal error, bad cg magic number"); ucg->cg_time = cpu_to_fs32(sb, get_seconds()); is_directory = S_ISDIR(inode->i_mode); DQUOT_FREE_INODE(inode); DQUOT_DROP(inode); clear_inode (inode); if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_iusedoff, bit)) ufs_error(sb, "ufs_free_inode", "bit already cleared for inode %u", ino); else { ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_iusedoff, bit); if (ino < ucpi->c_irotor) ucpi->c_irotor = ino; fs32_add(sb, &ucg->cg_cs.cs_nifree, 1); uspi->cs_total.cs_nifree++; fs32_add(sb, &UFS_SB(sb)->fs_cs(cg).cs_nifree, 1); if (is_directory) { fs32_sub(sb, &ucg->cg_cs.cs_ndir, 1); uspi->cs_total.cs_ndir--; fs32_sub(sb, &UFS_SB(sb)->fs_cs(cg).cs_ndir, 1); } } ubh_mark_buffer_dirty (USPI_UBH(uspi)); ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); if (sb->s_flags & MS_SYNCHRONOUS) { ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); ubh_wait_on_buffer (UCPI_UBH(ucpi)); } sb->s_dirt = 1; unlock_super (sb); UFSD("EXIT\n"); }
/* * There are two policies for allocating an inode. If the new inode is * a directory, then a forward search is made for a block group with both * free space and a low directory-to-inode ratio; if that fails, then of * the groups with above-average free space, that group with the fewest * directories already is chosen. * * For other inodes, search forward from the parent directory's block * group to find a free inode. */ struct inode * ufs_new_inode(struct inode * dir, int mode) { struct super_block * sb; struct ufs_sb_info * sbi; struct ufs_sb_private_info * uspi; struct ufs_super_block_first * usb1; struct ufs_cg_private_info * ucpi; struct ufs_cylinder_group * ucg; struct inode * inode; unsigned cg, bit, i, j, start; struct ufs_inode_info *ufsi; int err = -ENOSPC; UFSD("ENTER\n"); /* Cannot create files in a deleted directory */ if (!dir || !dir->i_nlink) return ERR_PTR(-EPERM); sb = dir->i_sb; inode = new_inode(sb); if (!inode) return ERR_PTR(-ENOMEM); ufsi = UFS_I(inode); sbi = UFS_SB(sb); uspi = sbi->s_uspi; usb1 = ubh_get_usb_first(uspi); lock_super (sb); /* * Try to place the inode in its parent directory */ i = ufs_inotocg(dir->i_ino); if (sbi->fs_cs(i).cs_nifree) { cg = i; goto cg_found; } /* * Use a quadratic hash to find a group with a free inode */ for ( j = 1; j < uspi->s_ncg; j <<= 1 ) { i += j; if (i >= uspi->s_ncg) i -= uspi->s_ncg; if (sbi->fs_cs(i).cs_nifree) { cg = i; goto cg_found; } } /* * That failed: try linear search for a free inode */ i = ufs_inotocg(dir->i_ino) + 1; for (j = 2; j < uspi->s_ncg; j++) { i++; if (i >= uspi->s_ncg) i = 0; if (sbi->fs_cs(i).cs_nifree) { cg = i; goto cg_found; } } goto failed; cg_found: ucpi = ufs_load_cylinder (sb, cg); if (!ucpi) { err = -EIO; goto failed; } ucg = ubh_get_ucg(UCPI_UBH(ucpi)); if (!ufs_cg_chkmagic(sb, ucg)) ufs_panic (sb, "ufs_new_inode", "internal error, bad cg magic number"); start = ucpi->c_irotor; bit = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_iusedoff, uspi->s_ipg, start); if (!(bit < uspi->s_ipg)) { bit = ubh_find_first_zero_bit (UCPI_UBH(ucpi), ucpi->c_iusedoff, start); if (!(bit < start)) { ufs_error (sb, "ufs_new_inode", "cylinder group %u corrupted - error in inode bitmap\n", cg); err = -EIO; goto failed; } } UFSD("start = %u, bit = %u, ipg = %u\n", start, bit, uspi->s_ipg); if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_iusedoff, bit)) ubh_setbit (UCPI_UBH(ucpi), ucpi->c_iusedoff, bit); else { ufs_panic (sb, "ufs_new_inode", "internal error"); err = -EIO; goto failed; } if (uspi->fs_magic == UFS2_MAGIC) { u32 initediblk = fs32_to_cpu(sb, ucg->cg_u.cg_u2.cg_initediblk); if (bit + uspi->s_inopb > initediblk && initediblk < fs32_to_cpu(sb, ucg->cg_u.cg_u2.cg_niblk)) ufs2_init_inodes_chunk(sb, ucpi, ucg); } fs32_sub(sb, &ucg->cg_cs.cs_nifree, 1); uspi->cs_total.cs_nifree--; fs32_sub(sb, &sbi->fs_cs(cg).cs_nifree, 1); if (S_ISDIR(mode)) { fs32_add(sb, &ucg->cg_cs.cs_ndir, 1); uspi->cs_total.cs_ndir++; fs32_add(sb, &sbi->fs_cs(cg).cs_ndir, 1); } ubh_mark_buffer_dirty (USPI_UBH(uspi)); ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); if (sb->s_flags & MS_SYNCHRONOUS) { ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); ubh_wait_on_buffer (UCPI_UBH(ucpi)); } sb->s_dirt = 1; inode->i_ino = cg * uspi->s_ipg + bit; inode->i_mode = mode; inode->i_uid = current->fsuid; if (dir->i_mode & S_ISGID) { inode->i_gid = dir->i_gid; if (S_ISDIR(mode)) inode->i_mode |= S_ISGID; } else inode->i_gid = current->fsgid; inode->i_blocks = 0; inode->i_generation = 0; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; ufsi->i_flags = UFS_I(dir)->i_flags; ufsi->i_lastfrag = 0; ufsi->i_shadow = 0; ufsi->i_osync = 0; ufsi->i_oeftflag = 0; ufsi->i_dir_start_lookup = 0; memset(&ufsi->i_u1, 0, sizeof(ufsi->i_u1)); insert_inode_hash(inode); mark_inode_dirty(inode); if (uspi->fs_magic == UFS2_MAGIC) { struct buffer_head *bh; struct ufs2_inode *ufs2_inode; /* * setup birth date, we do it here because of there is no sense * to hold it in struct ufs_inode_info, and lose 64 bit */ bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino)); if (!bh) { ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino); err = -EIO; goto fail_remove_inode; } lock_buffer(bh); ufs2_inode = (struct ufs2_inode *)bh->b_data; ufs2_inode += ufs_inotofsbo(inode->i_ino); ufs2_inode->ui_birthtime = cpu_to_fs64(sb, CURRENT_TIME.tv_sec); ufs2_inode->ui_birthnsec = cpu_to_fs32(sb, CURRENT_TIME.tv_nsec); mark_buffer_dirty(bh); unlock_buffer(bh); if (sb->s_flags & MS_SYNCHRONOUS) sync_dirty_buffer(bh); brelse(bh); } unlock_super (sb); if (DQUOT_ALLOC_INODE(inode)) { DQUOT_DROP(inode); err = -EDQUOT; goto fail_without_unlock; } UFSD("allocating inode %lu\n", inode->i_ino); UFSD("EXIT\n"); return inode; fail_remove_inode: unlock_super(sb); fail_without_unlock: inode->i_flags |= S_NOQUOTA; inode->i_nlink = 0; iput(inode); UFSD("EXIT (FAILED): err %d\n", err); return ERR_PTR(err); failed: unlock_super (sb); make_bad_inode(inode); iput (inode); UFSD("EXIT (FAILED): err %d\n", err); return ERR_PTR(err); }
/* * There are two policies for allocating an inode. If the new inode is * a directory, then a forward search is made for a block group with both * free space and a low directory-to-inode ratio; if that fails, then of * the groups with above-average free space, that group with the fewest * directories already is chosen. * * For other inodes, search forward from the parent directory's block * group to find a free inode. */ struct inode * ufs_new_inode(struct inode * dir, int mode) { struct super_block * sb; struct ufs_sb_info * sbi; struct ufs_sb_private_info * uspi; struct ufs_super_block_first * usb1; struct ufs_cg_private_info * ucpi; struct ufs_cylinder_group * ucg; struct inode * inode; unsigned cg, bit, i, j, start; struct ufs_inode_info *ufsi; UFSD(("ENTER\n")) /* Cannot create files in a deleted directory */ if (!dir || !dir->i_nlink) return ERR_PTR(-EPERM); sb = dir->i_sb; inode = new_inode(sb); if (!inode) return ERR_PTR(-ENOMEM); ufsi = UFS_I(inode); sbi = UFS_SB(sb); uspi = sbi->s_uspi; usb1 = ubh_get_usb_first(USPI_UBH); lock_super (sb); /* * Try to place the inode in its parent directory */ i = ufs_inotocg(dir->i_ino); if (sbi->fs_cs(i).cs_nifree) { cg = i; goto cg_found; } /* * Use a quadratic hash to find a group with a free inode */ for ( j = 1; j < uspi->s_ncg; j <<= 1 ) { i += j; if (i >= uspi->s_ncg) i -= uspi->s_ncg; if (sbi->fs_cs(i).cs_nifree) { cg = i; goto cg_found; } } /* * That failed: try linear search for a free inode */ i = ufs_inotocg(dir->i_ino) + 1; for (j = 2; j < uspi->s_ncg; j++) { i++; if (i >= uspi->s_ncg) i = 0; if (sbi->fs_cs(i).cs_nifree) { cg = i; goto cg_found; } } goto failed; cg_found: ucpi = ufs_load_cylinder (sb, cg); if (!ucpi) goto failed; ucg = ubh_get_ucg(UCPI_UBH); if (!ufs_cg_chkmagic(sb, ucg)) ufs_panic (sb, "ufs_new_inode", "internal error, bad cg magic number"); start = ucpi->c_irotor; bit = ubh_find_next_zero_bit (UCPI_UBH, ucpi->c_iusedoff, uspi->s_ipg, start); if (!(bit < uspi->s_ipg)) { bit = ubh_find_first_zero_bit (UCPI_UBH, ucpi->c_iusedoff, start); if (!(bit < start)) { ufs_error (sb, "ufs_new_inode", "cylinder group %u corrupted - error in inode bitmap\n", cg); goto failed; } } UFSD(("start = %u, bit = %u, ipg = %u\n", start, bit, uspi->s_ipg)) if (ubh_isclr (UCPI_UBH, ucpi->c_iusedoff, bit)) ubh_setbit (UCPI_UBH, ucpi->c_iusedoff, bit); else { ufs_panic (sb, "ufs_new_inode", "internal error"); goto failed; } fs32_sub(sb, &ucg->cg_cs.cs_nifree, 1); fs32_sub(sb, &usb1->fs_cstotal.cs_nifree, 1); fs32_sub(sb, &sbi->fs_cs(cg).cs_nifree, 1); if (S_ISDIR(mode)) { fs32_add(sb, &ucg->cg_cs.cs_ndir, 1); fs32_add(sb, &usb1->fs_cstotal.cs_ndir, 1); fs32_add(sb, &sbi->fs_cs(cg).cs_ndir, 1); } ubh_mark_buffer_dirty (USPI_UBH); ubh_mark_buffer_dirty (UCPI_UBH); if (sb->s_flags & MS_SYNCHRONOUS) { ubh_wait_on_buffer (UCPI_UBH); ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **) &ucpi); ubh_wait_on_buffer (UCPI_UBH); } sb->s_dirt = 1; inode->i_mode = mode; inode->i_uid = current->fsuid; if (dir->i_mode & S_ISGID) { inode->i_gid = dir->i_gid; if (S_ISDIR(mode)) inode->i_mode |= S_ISGID; } else inode->i_gid = current->fsgid; inode->i_ino = cg * uspi->s_ipg + bit; inode->i_blksize = PAGE_SIZE; /* This is the optimal IO size (for stat), not the fs block size */ inode->i_blocks = 0; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; ufsi->i_flags = UFS_I(dir)->i_flags; ufsi->i_lastfrag = 0; ufsi->i_gen = 0; ufsi->i_shadow = 0; ufsi->i_osync = 0; ufsi->i_oeftflag = 0; memset(&ufsi->i_u1, 0, sizeof(ufsi->i_u1)); insert_inode_hash(inode); mark_inode_dirty(inode); unlock_super (sb); if (DQUOT_ALLOC_INODE(inode)) { DQUOT_DROP(inode); inode->i_flags |= S_NOQUOTA; inode->i_nlink = 0; iput(inode); return ERR_PTR(-EDQUOT); } UFSD(("allocating inode %lu\n", inode->i_ino)) UFSD(("EXIT\n")) return inode; failed: unlock_super (sb); make_bad_inode(inode); iput (inode); UFSD(("EXIT (FAILED)\n")) return ERR_PTR(-ENOSPC); }
/* * Free 'count' fragments from fragment number 'fragment' */ void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count) { struct super_block * sb; struct ufs_sb_private_info * uspi; struct ufs_super_block_first * usb1; struct ufs_cg_private_info * ucpi; struct ufs_cylinder_group * ucg; unsigned cgno, bit, end_bit, bbase, blkmap, i, blkno, cylno; unsigned swab; sb = inode->i_sb; uspi = sb->u.ufs_sb.s_uspi; swab = sb->u.ufs_sb.s_swab; usb1 = ubh_get_usb_first(USPI_UBH); UFSD(("ENTER, fragment %u, count %u\n", fragment, count)) if (ufs_fragnum(fragment) + count > uspi->s_fpg) ufs_error (sb, "ufs_free_fragments", "internal error"); lock_super(sb); cgno = ufs_dtog(fragment); bit = ufs_dtogd(fragment); if (cgno >= uspi->s_ncg) { ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device"); goto failed; } ucpi = ufs_load_cylinder (sb, cgno); if (!ucpi) goto failed; ucg = ubh_get_ucg (UCPI_UBH); if (!ufs_cg_chkmagic (ucg)) { ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno); goto failed; } end_bit = bit + count; bbase = ufs_blknum (bit); blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase); ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1); for (i = bit; i < end_bit; i++) { if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, i)) ubh_setbit (UCPI_UBH, ucpi->c_freeoff, i); else ufs_error (sb, "ufs_free_fragments", "bit already cleared for fragment %u", i); } DQUOT_FREE_BLOCK (sb, inode, count); ADD_SWAB32(ucg->cg_cs.cs_nffree, count); ADD_SWAB32(usb1->fs_cstotal.cs_nffree, count); ADD_SWAB32(sb->fs_cs(cgno).cs_nffree, count); blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase); ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1); /* * Trying to reassemble free fragments into block */ blkno = ufs_fragstoblks (bbase); if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) { SUB_SWAB32(ucg->cg_cs.cs_nffree, uspi->s_fpb); SUB_SWAB32(usb1->fs_cstotal.cs_nffree, uspi->s_fpb); SUB_SWAB32(sb->fs_cs(cgno).cs_nffree, uspi->s_fpb); if ((sb->u.ufs_sb.s_flags & UFS_CG_MASK) == UFS_CG_44BSD) ufs_clusteracct (sb, ucpi, blkno, 1); INC_SWAB32(ucg->cg_cs.cs_nbfree); INC_SWAB32(usb1->fs_cstotal.cs_nbfree); INC_SWAB32(sb->fs_cs(cgno).cs_nbfree); cylno = ufs_cbtocylno (bbase); INC_SWAB16(ubh_cg_blks (ucpi, cylno, ufs_cbtorpos(bbase))); INC_SWAB32(ubh_cg_blktot (ucpi, cylno)); } ubh_mark_buffer_dirty (USPI_UBH); ubh_mark_buffer_dirty (UCPI_UBH); if (sb->s_flags & MS_SYNCHRONOUS) { ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi); ubh_wait_on_buffer (UCPI_UBH); } sb->s_dirt = 1; unlock_super (sb); UFSD(("EXIT\n")) return; failed: unlock_super (sb); UFSD(("EXIT (FAILED)\n")) return; }
/* * Free 'count' fragments from fragment number 'fragment' (free whole blocks) */ void ufs_free_blocks (struct inode * inode, unsigned fragment, unsigned count) { struct super_block * sb; struct ufs_sb_private_info * uspi; struct ufs_super_block_first * usb1; struct ufs_cg_private_info * ucpi; struct ufs_cylinder_group * ucg; unsigned overflow, cgno, bit, end_bit, blkno, i, cylno; unsigned swab; sb = inode->i_sb; uspi = sb->u.ufs_sb.s_uspi; swab = sb->u.ufs_sb.s_swab; usb1 = ubh_get_usb_first(USPI_UBH); UFSD(("ENTER, fragment %u, count %u\n", fragment, count)) if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) { ufs_error (sb, "ufs_free_blocks", "internal error, " "fragment %u, count %u\n", fragment, count); goto failed; } lock_super(sb); do_more: overflow = 0; cgno = ufs_dtog (fragment); bit = ufs_dtogd (fragment); if (cgno >= uspi->s_ncg) { ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device"); goto failed; } end_bit = bit + count; if (end_bit > uspi->s_fpg) { overflow = bit + count - uspi->s_fpg; count -= overflow; end_bit -= overflow; } ucpi = ufs_load_cylinder (sb, cgno); if (!ucpi) goto failed; ucg = ubh_get_ucg (UCPI_UBH); if (!ufs_cg_chkmagic (ucg)) { ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno); goto failed; } for (i = bit; i < end_bit; i += uspi->s_fpb) { blkno = ufs_fragstoblks(i); if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) { ufs_error(sb, "ufs_free_blocks", "freeing free fragment"); } ubh_setblock(UCPI_UBH, ucpi->c_freeoff, blkno); if ((sb->u.ufs_sb.s_flags & UFS_CG_MASK) == UFS_CG_44BSD) ufs_clusteracct (sb, ucpi, blkno, 1); DQUOT_FREE_BLOCK(sb, inode, uspi->s_fpb); INC_SWAB32(ucg->cg_cs.cs_nbfree); INC_SWAB32(usb1->fs_cstotal.cs_nbfree); INC_SWAB32(sb->fs_cs(cgno).cs_nbfree); cylno = ufs_cbtocylno(i); INC_SWAB16(ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(i))); INC_SWAB32(ubh_cg_blktot(ucpi, cylno)); } ubh_mark_buffer_dirty (USPI_UBH); ubh_mark_buffer_dirty (UCPI_UBH); if (sb->s_flags & MS_SYNCHRONOUS) { ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi); ubh_wait_on_buffer (UCPI_UBH); } if (overflow) { fragment += count; count = overflow; goto do_more; } sb->s_dirt = 1; unlock_super (sb); UFSD(("EXIT\n")) return; failed: unlock_super (sb); UFSD(("EXIT (FAILED)\n")) return; }