/* * Nullify new chunk of inodes, * BSD people also set ui_gen field of inode * during nullification, but we not care about * that because of linux ufs do not support NFS */ static void ufs2_init_inodes_chunk(struct super_block *sb, struct ufs_cg_private_info *ucpi, struct ufs_cylinder_group *ucg) { struct buffer_head *bh; struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; sector_t beg = uspi->s_sbbase + ufs_inotofsba(ucpi->c_cgx * uspi->s_ipg + fs32_to_cpu(sb, ucg->cg_u.cg_u2.cg_initediblk)); sector_t end = beg + uspi->s_fpb; UFSD("ENTER cgno %d\n", ucpi->c_cgx); for (; beg < end; ++beg) { bh = sb_getblk(sb, beg); lock_buffer(bh); memset(bh->b_data, 0, sb->s_blocksize); set_buffer_uptodate(bh); mark_buffer_dirty(bh); unlock_buffer(bh); if (sb->s_flags & MS_SYNCHRONOUS) sync_dirty_buffer(bh); brelse(bh); } fs32_add(sb, &ucg->cg_u.cg_u2.cg_initediblk, uspi->s_inopb); ubh_mark_buffer_dirty(UCPI_UBH(ucpi)); if (sb->s_flags & MS_SYNCHRONOUS) { ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); ubh_wait_on_buffer(UCPI_UBH(ucpi)); } UFSD("EXIT\n"); }
void ufs_error (struct super_block * sb, const char * function, const char * fmt, ...) { struct ufs_sb_private_info * uspi; struct ufs_super_block_first * usb1; va_list args; uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first(uspi); if (!(sb->s_flags & MS_RDONLY)) { usb1->fs_clean = UFS_FSBAD; ubh_mark_buffer_dirty(USPI_UBH(uspi)); sb->s_dirt = 1; sb->s_flags |= MS_RDONLY; } va_start (args, fmt); vsnprintf (error_buf, sizeof(error_buf), fmt, args); va_end (args); switch (UFS_SB(sb)->s_mount_opt & UFS_MOUNT_ONERROR) { case UFS_MOUNT_ONERROR_PANIC: panic ("UFS-fs panic (device %s): %s: %s\n", sb->s_id, function, error_buf); case UFS_MOUNT_ONERROR_LOCK: case UFS_MOUNT_ONERROR_UMOUNT: case UFS_MOUNT_ONERROR_REPAIR: printk (KERN_CRIT "UFS-fs error (device %s): %s: %s\n", sb->s_id, function, error_buf); } }
void ufs_error (struct super_block * sb, const char * function, const char * fmt, ...) { struct ufs_sb_private_info * uspi; struct ufs_super_block_first * usb1; struct va_format vaf; va_list args; uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first(uspi); if (!(sb->s_flags & MS_RDONLY)) { usb1->fs_clean = UFS_FSBAD; ubh_mark_buffer_dirty(USPI_UBH(uspi)); ufs_mark_sb_dirty(sb); sb->s_flags |= MS_RDONLY; } va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; switch (UFS_SB(sb)->s_mount_opt & UFS_MOUNT_ONERROR) { case UFS_MOUNT_ONERROR_PANIC: panic("panic (device %s): %s: %pV\n", sb->s_id, function, &vaf); case UFS_MOUNT_ONERROR_LOCK: case UFS_MOUNT_ONERROR_UMOUNT: case UFS_MOUNT_ONERROR_REPAIR: pr_crit("error (device %s): %s: %pV\n", sb->s_id, function, &vaf); } va_end(args); }
static int ufs_trunc_tindirect (struct inode * inode) { struct ufs_inode_info *ufsi = UFS_I(inode); struct super_block * sb; struct ufs_sb_private_info * uspi; struct ufs_buffer_head * tind_bh; unsigned tindirect_block, tmp, i; __fs32 * tind, * p; int retry; UFSD("ENTER\n"); sb = inode->i_sb; uspi = UFS_SB(sb)->s_uspi; retry = 0; tindirect_block = (DIRECT_BLOCK > (UFS_NDADDR + uspi->s_apb + uspi->s_2apb)) ? ((DIRECT_BLOCK - UFS_NDADDR - uspi->s_apb - uspi->s_2apb) >> uspi->s_2apbshift) : 0; p = ufsi->i_u1.i_data + UFS_TIND_BLOCK; if (!(tmp = fs32_to_cpu(sb, *p))) return 0; tind_bh = ubh_bread (sb, tmp, uspi->s_bsize); if (tmp != fs32_to_cpu(sb, *p)) { ubh_brelse (tind_bh); return 1; } if (!tind_bh) { *p = 0; return 0; } for (i = tindirect_block ; i < uspi->s_apb ; i++) { tind = ubh_get_addr32 (tind_bh, i); retry |= ufs_trunc_dindirect(inode, UFS_NDADDR + uspi->s_apb + ((i + 1) << uspi->s_2apbshift), tind); ubh_mark_buffer_dirty(tind_bh); } for (i = 0; i < uspi->s_apb; i++) if (*ubh_get_addr32 (tind_bh, i)) break; if (i >= uspi->s_apb) { tmp = fs32_to_cpu(sb, *p); *p = 0; ufs_free_blocks(inode, tmp, uspi->s_fpb); mark_inode_dirty(inode); ubh_bforget(tind_bh); tind_bh = NULL; } if (IS_SYNC(inode) && tind_bh && ubh_buffer_dirty(tind_bh)) { ubh_ll_rw_block(SWRITE, tind_bh); ubh_wait_on_buffer (tind_bh); } ubh_brelse (tind_bh); UFSD("EXIT\n"); return retry; }
static int ufs_trunc_tindirect(struct inode *inode) { struct super_block *sb = inode->i_sb; struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; struct ufs_inode_info *ufsi = UFS_I(inode); struct ufs_buffer_head * tind_bh; u64 tindirect_block, tmp, i; void *tind, *p; int retry; UFSD("ENTER: ino %lu\n", inode->i_ino); retry = 0; tindirect_block = (DIRECT_BLOCK > (UFS_NDADDR + uspi->s_apb + uspi->s_2apb)) ? ((DIRECT_BLOCK - UFS_NDADDR - uspi->s_apb - uspi->s_2apb) >> uspi->s_2apbshift) : 0; p = ufs_get_direct_data_ptr(uspi, ufsi, UFS_TIND_BLOCK); if (!(tmp = ufs_data_ptr_to_cpu(sb, p))) return 0; tind_bh = ubh_bread (sb, tmp, uspi->s_bsize); if (tmp != ufs_data_ptr_to_cpu(sb, p)) { ubh_brelse (tind_bh); return 1; } if (!tind_bh) { ufs_data_ptr_clear(uspi, p); return 0; } for (i = tindirect_block ; i < uspi->s_apb ; i++) { tind = ubh_get_data_ptr(uspi, tind_bh, i); retry |= ufs_trunc_dindirect(inode, UFS_NDADDR + uspi->s_apb + ((i + 1) << uspi->s_2apbshift), tind); ubh_mark_buffer_dirty(tind_bh); } for (i = 0; i < uspi->s_apb; i++) if (!ufs_is_data_ptr_zero(uspi, ubh_get_data_ptr(uspi, tind_bh, i))) break; if (i >= uspi->s_apb) { tmp = ufs_data_ptr_to_cpu(sb, p); ufs_data_ptr_clear(uspi, p); ufs_free_blocks(inode, tmp, uspi->s_fpb); mark_inode_dirty(inode); ubh_bforget(tind_bh); tind_bh = NULL; } if (IS_SYNC(inode) && tind_bh && ubh_buffer_dirty(tind_bh)) { ubh_ll_rw_block(SWRITE, tind_bh); ubh_wait_on_buffer (tind_bh); } ubh_brelse (tind_bh); UFSD("EXIT: ino %lu\n", inode->i_ino); return retry; }
/** * ufs_put_super_internal() - put on-disk intrenal structures * @sb: pointer to super_block structure * Put on-disk structures associated with cylinder groups * and write them back to disk, also update cs_total on disk */ static void ufs_put_super_internal(struct super_block *sb) { struct ufs_sb_info *sbi = UFS_SB(sb); struct ufs_sb_private_info *uspi = sbi->s_uspi; struct ufs_buffer_head * ubh; unsigned char * base, * space; unsigned blks, size, i; UFSD("ENTER\n"); lock_kernel(); ufs_put_cstotal(sb); size = uspi->s_cssize; blks = (size + uspi->s_fsize - 1) >> uspi->s_fshift; base = space = (char*) sbi->s_csp; for (i = 0; i < blks; i += uspi->s_fpb) { size = uspi->s_bsize; if (i + uspi->s_fpb > blks) size = (blks - i) * uspi->s_fsize; ubh = ubh_bread(sb, uspi->s_csaddr + i, size); ubh_memcpyubh (ubh, space, size); space += size; ubh_mark_buffer_uptodate (ubh, 1); ubh_mark_buffer_dirty (ubh); ubh_brelse (ubh); } for (i = 0; i < sbi->s_cg_loaded; i++) { ufs_put_cylinder (sb, i); kfree (sbi->s_ucpi[i]); } for (; i < UFS_MAX_GROUP_LOADED; i++) kfree (sbi->s_ucpi[i]); for (i = 0; i < uspi->s_ncg; i++) brelse (sbi->s_ucg[i]); kfree (sbi->s_ucg); kfree (base); unlock_kernel(); UFSD("EXIT\n"); }
/* * Sync our internal copy of fs_cstotal with disk */ static void ufs_put_cstotal(struct super_block *sb) { unsigned mtype = UFS_SB(sb)->s_mount_opt & UFS_MOUNT_UFSTYPE; struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; struct ufs_super_block_first *usb1; struct ufs_super_block_second *usb2; struct ufs_super_block_third *usb3; UFSD("ENTER\n"); usb1 = ubh_get_usb_first(uspi); usb2 = ubh_get_usb_second(uspi); usb3 = ubh_get_usb_third(uspi); if ((mtype == UFS_MOUNT_UFSTYPE_44BSD && (usb1->fs_flags & UFS_FLAGS_UPDATED)) || mtype == UFS_MOUNT_UFSTYPE_UFS2) { /*we have statistic in different place, then usual*/ usb2->fs_un.fs_u2.cs_ndir = cpu_to_fs64(sb, uspi->cs_total.cs_ndir); usb2->fs_un.fs_u2.cs_nbfree = cpu_to_fs64(sb, uspi->cs_total.cs_nbfree); usb3->fs_un1.fs_u2.cs_nifree = cpu_to_fs64(sb, uspi->cs_total.cs_nifree); usb3->fs_un1.fs_u2.cs_nffree = cpu_to_fs64(sb, uspi->cs_total.cs_nffree); } else { usb1->fs_cstotal.cs_ndir = cpu_to_fs32(sb, uspi->cs_total.cs_ndir); usb1->fs_cstotal.cs_nbfree = cpu_to_fs32(sb, uspi->cs_total.cs_nbfree); usb1->fs_cstotal.cs_nifree = cpu_to_fs32(sb, uspi->cs_total.cs_nifree); usb1->fs_cstotal.cs_nffree = cpu_to_fs32(sb, uspi->cs_total.cs_nffree); } ubh_mark_buffer_dirty(USPI_UBH(uspi)); ufs_print_super_stuff(sb, usb1, usb2, usb3); UFSD("EXIT\n"); }
void ufs_panic (struct super_block * sb, const char * function, const char * fmt, ...) { struct ufs_sb_private_info * uspi; struct ufs_super_block_first * usb1; va_list args; uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first(uspi); if (!(sb->s_flags & MS_RDONLY)) { usb1->fs_clean = UFS_FSBAD; ubh_mark_buffer_dirty(USPI_UBH(uspi)); sb->s_dirt = 1; } va_start (args, fmt); vsnprintf (error_buf, sizeof(error_buf), fmt, args); va_end (args); sb->s_flags |= MS_RDONLY; printk (KERN_CRIT "UFS-fs panic (device %s): %s: %s\n", sb->s_id, function, error_buf); }
/* * Remove cylinder group from cache, doesn't release memory * allocated for cylinder group (this is done at ufs_put_super only). */ void ufs_put_cylinder (struct super_block * sb, unsigned bitmap_nr) { struct ufs_sb_info * sbi = UFS_SB(sb); struct ufs_sb_private_info * uspi; struct ufs_cg_private_info * ucpi; struct ufs_cylinder_group * ucg; unsigned i; UFSD("ENTER, bitmap_nr %u\n", bitmap_nr); uspi = sbi->s_uspi; if (sbi->s_cgno[bitmap_nr] == UFS_CGNO_EMPTY) { UFSD("EXIT\n"); return; } ucpi = sbi->s_ucpi[bitmap_nr]; ucg = ubh_get_ucg(UCPI_UBH(ucpi)); if (uspi->s_ncg > UFS_MAX_GROUP_LOADED && bitmap_nr >= sbi->s_cg_loaded) { ufs_panic (sb, "ufs_put_cylinder", "internal error"); return; } /* * rotor is not so important data, so we put it to disk * at the end of working with cylinder */ ucg->cg_rotor = cpu_to_fs32(sb, ucpi->c_rotor); ucg->cg_frotor = cpu_to_fs32(sb, ucpi->c_frotor); ucg->cg_irotor = cpu_to_fs32(sb, ucpi->c_irotor); ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); for (i = 1; i < UCPI_UBH(ucpi)->count; i++) { brelse (UCPI_UBH(ucpi)->bh[i]); } sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY; UFSD("EXIT\n"); }
void ufs_panic (struct super_block * sb, const char * function, const char * fmt, ...) { struct ufs_sb_private_info * uspi; struct ufs_super_block_first * usb1; struct va_format vaf; va_list args; uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first(uspi); if (!(sb->s_flags & MS_RDONLY)) { usb1->fs_clean = UFS_FSBAD; ubh_mark_buffer_dirty(USPI_UBH(uspi)); ufs_mark_sb_dirty(sb); } va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; sb->s_flags |= MS_RDONLY; pr_crit("panic (device %s): %s: %pV\n", sb->s_id, function, &vaf); va_end(args); }
/* * NOTE! When we get the inode, we're the only people * that have access to it, and as such there are no * race conditions we have to worry about. The inode * is not on the hash-lists, and it cannot be reached * through the filesystem because the directory entry * has been deleted earlier. * * HOWEVER: we must make sure that we get no aliases, * which means that we have to call "clear_inode()" * _before_ we mark the inode not in use in the inode * bitmaps. Otherwise a newly created file might use * the same inode number (not actually the same pointer * though), and then we'd have two inodes sharing the * same inode number and space on the harddisk. */ void ufs_free_inode (struct inode * inode) { struct super_block * sb; struct ufs_sb_private_info * uspi; struct ufs_super_block_first * usb1; struct ufs_cg_private_info * ucpi; struct ufs_cylinder_group * ucg; int is_directory; unsigned ino, cg, bit; UFSD("ENTER, ino %lu\n", inode->i_ino); sb = inode->i_sb; uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first(uspi); ino = inode->i_ino; lock_super (sb); if (!((ino > 1) && (ino < (uspi->s_ncg * uspi->s_ipg )))) { ufs_warning(sb, "ufs_free_inode", "reserved inode or nonexistent inode %u\n", ino); unlock_super (sb); return; } cg = ufs_inotocg (ino); bit = ufs_inotocgoff (ino); ucpi = ufs_load_cylinder (sb, cg); if (!ucpi) { unlock_super (sb); return; } ucg = ubh_get_ucg(UCPI_UBH(ucpi)); if (!ufs_cg_chkmagic(sb, ucg)) ufs_panic (sb, "ufs_free_fragments", "internal error, bad cg magic number"); ucg->cg_time = cpu_to_fs32(sb, get_seconds()); is_directory = S_ISDIR(inode->i_mode); DQUOT_FREE_INODE(inode); DQUOT_DROP(inode); clear_inode (inode); if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_iusedoff, bit)) ufs_error(sb, "ufs_free_inode", "bit already cleared for inode %u", ino); else { ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_iusedoff, bit); if (ino < ucpi->c_irotor) ucpi->c_irotor = ino; fs32_add(sb, &ucg->cg_cs.cs_nifree, 1); uspi->cs_total.cs_nifree++; fs32_add(sb, &UFS_SB(sb)->fs_cs(cg).cs_nifree, 1); if (is_directory) { fs32_sub(sb, &ucg->cg_cs.cs_ndir, 1); uspi->cs_total.cs_ndir--; fs32_sub(sb, &UFS_SB(sb)->fs_cs(cg).cs_ndir, 1); } } ubh_mark_buffer_dirty (USPI_UBH(uspi)); ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); if (sb->s_flags & MS_SYNCHRONOUS) { ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); ubh_wait_on_buffer (UCPI_UBH(ucpi)); } sb->s_dirt = 1; unlock_super (sb); UFSD("EXIT\n"); }
static int ufs_remount (struct super_block *sb, int *mount_flags, char *data) { struct ufs_sb_private_info * uspi; struct ufs_super_block_first * usb1; struct ufs_super_block_third * usb3; unsigned new_mount_opt, ufstype; unsigned flags; sync_filesystem(sb); mutex_lock(&UFS_SB(sb)->s_lock); uspi = UFS_SB(sb)->s_uspi; flags = UFS_SB(sb)->s_flags; usb1 = ubh_get_usb_first(uspi); usb3 = ubh_get_usb_third(uspi); /* * Allow the "check" option to be passed as a remount option. * It is not possible to change ufstype option during remount */ ufstype = UFS_SB(sb)->s_mount_opt & UFS_MOUNT_UFSTYPE; new_mount_opt = 0; ufs_set_opt (new_mount_opt, ONERROR_LOCK); if (!ufs_parse_options (data, &new_mount_opt)) { mutex_unlock(&UFS_SB(sb)->s_lock); return -EINVAL; } if (!(new_mount_opt & UFS_MOUNT_UFSTYPE)) { new_mount_opt |= ufstype; } else if ((new_mount_opt & UFS_MOUNT_UFSTYPE) != ufstype) { pr_err("ufstype can't be changed during remount\n"); mutex_unlock(&UFS_SB(sb)->s_lock); return -EINVAL; } if ((*mount_flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) { UFS_SB(sb)->s_mount_opt = new_mount_opt; mutex_unlock(&UFS_SB(sb)->s_lock); return 0; } /* * fs was mouted as rw, remounting ro */ if (*mount_flags & MS_RDONLY) { ufs_put_super_internal(sb); usb1->fs_time = cpu_to_fs32(sb, get_seconds()); if ((flags & UFS_ST_MASK) == UFS_ST_SUN || (flags & UFS_ST_MASK) == UFS_ST_SUNOS || (flags & UFS_ST_MASK) == UFS_ST_SUNx86) ufs_set_fs_state(sb, usb1, usb3, UFS_FSOK - fs32_to_cpu(sb, usb1->fs_time)); ubh_mark_buffer_dirty (USPI_UBH(uspi)); sb->s_flags |= MS_RDONLY; } else { /* * fs was mounted as ro, remounting rw */ #ifndef CONFIG_UFS_FS_WRITE pr_err("ufs was compiled with read-only support, can't be mounted as read-write\n"); mutex_unlock(&UFS_SB(sb)->s_lock); return -EINVAL; #else if (ufstype != UFS_MOUNT_UFSTYPE_SUN && ufstype != UFS_MOUNT_UFSTYPE_SUNOS && ufstype != UFS_MOUNT_UFSTYPE_44BSD && ufstype != UFS_MOUNT_UFSTYPE_SUNx86 && ufstype != UFS_MOUNT_UFSTYPE_UFS2) { pr_err("this ufstype is read-only supported\n"); mutex_unlock(&UFS_SB(sb)->s_lock); return -EINVAL; } if (!ufs_read_cylinder_structures(sb)) { pr_err("failed during remounting\n"); mutex_unlock(&UFS_SB(sb)->s_lock); return -EPERM; } sb->s_flags &= ~MS_RDONLY; #endif } UFS_SB(sb)->s_mount_opt = new_mount_opt; mutex_unlock(&UFS_SB(sb)->s_lock); return 0; }
/* * Free 'count' fragments from fragment number 'fragment' (free whole blocks) */ void ufs_free_blocks (struct inode * inode, unsigned fragment, unsigned count) { struct super_block * sb; struct ufs_sb_private_info * uspi; struct ufs_super_block_first * usb1; struct ufs_cg_private_info * ucpi; struct ufs_cylinder_group * ucg; unsigned overflow, cgno, bit, end_bit, blkno, i, cylno; unsigned swab; sb = inode->i_sb; uspi = sb->u.ufs_sb.s_uspi; swab = sb->u.ufs_sb.s_swab; usb1 = ubh_get_usb_first(USPI_UBH); UFSD(("ENTER, fragment %u, count %u\n", fragment, count)) if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) { ufs_error (sb, "ufs_free_blocks", "internal error, " "fragment %u, count %u\n", fragment, count); goto failed; } lock_super(sb); do_more: overflow = 0; cgno = ufs_dtog (fragment); bit = ufs_dtogd (fragment); if (cgno >= uspi->s_ncg) { ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device"); goto failed; } end_bit = bit + count; if (end_bit > uspi->s_fpg) { overflow = bit + count - uspi->s_fpg; count -= overflow; end_bit -= overflow; } ucpi = ufs_load_cylinder (sb, cgno); if (!ucpi) goto failed; ucg = ubh_get_ucg (UCPI_UBH); if (!ufs_cg_chkmagic (ucg)) { ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno); goto failed; } for (i = bit; i < end_bit; i += uspi->s_fpb) { blkno = ufs_fragstoblks(i); if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) { ufs_error(sb, "ufs_free_blocks", "freeing free fragment"); } ubh_setblock(UCPI_UBH, ucpi->c_freeoff, blkno); if ((sb->u.ufs_sb.s_flags & UFS_CG_MASK) == UFS_CG_44BSD) ufs_clusteracct (sb, ucpi, blkno, 1); DQUOT_FREE_BLOCK(sb, inode, uspi->s_fpb); INC_SWAB32(ucg->cg_cs.cs_nbfree); INC_SWAB32(usb1->fs_cstotal.cs_nbfree); INC_SWAB32(sb->fs_cs(cgno).cs_nbfree); cylno = ufs_cbtocylno(i); INC_SWAB16(ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(i))); INC_SWAB32(ubh_cg_blktot(ucpi, cylno)); } ubh_mark_buffer_dirty (USPI_UBH); ubh_mark_buffer_dirty (UCPI_UBH); if (sb->s_flags & MS_SYNCHRONOUS) { ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi); ubh_wait_on_buffer (UCPI_UBH); } if (overflow) { fragment += count; count = overflow; goto do_more; } sb->s_dirt = 1; unlock_super (sb); UFSD(("EXIT\n")) return; failed: unlock_super (sb); UFSD(("EXIT (FAILED)\n")) return; }
static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno, u64 goal, unsigned count, int *err) { struct super_block * sb; struct ufs_sb_private_info * uspi; struct ufs_super_block_first * usb1; struct ufs_cg_private_info * ucpi; struct ufs_cylinder_group * ucg; unsigned oldcg, i, j, k, allocsize; u64 result; UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n", inode->i_ino, cgno, (unsigned long long)goal, count); sb = inode->i_sb; uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first(uspi); oldcg = cgno; /* * 1. searching on preferred cylinder group */ UFS_TEST_FREE_SPACE_CG /* * 2. quadratic rehash */ for (j = 1; j < uspi->s_ncg; j *= 2) { cgno += j; if (cgno >= uspi->s_ncg) cgno -= uspi->s_ncg; UFS_TEST_FREE_SPACE_CG } /* * 3. brute force search * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step ) */ cgno = (oldcg + 1) % uspi->s_ncg; for (j = 2; j < uspi->s_ncg; j++) { cgno++; if (cgno >= uspi->s_ncg) cgno = 0; UFS_TEST_FREE_SPACE_CG } UFSD("EXIT (FAILED)\n"); return 0; cg_found: ucpi = ufs_load_cylinder (sb, cgno); if (!ucpi) return 0; ucg = ubh_get_ucg (UCPI_UBH(ucpi)); if (!ufs_cg_chkmagic(sb, ucg)) ufs_panic (sb, "ufs_alloc_fragments", "internal error, bad magic number on cg %u", cgno); ucg->cg_time = cpu_to_fs32(sb, get_seconds()); if (count == uspi->s_fpb) { result = ufs_alloccg_block (inode, ucpi, goal, err); if (result == INVBLOCK) return 0; goto succed; } for (allocsize = count; allocsize < uspi->s_fpb; allocsize++) if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0) break; if (allocsize == uspi->s_fpb) { result = ufs_alloccg_block (inode, ucpi, goal, err); if (result == INVBLOCK) return 0; goal = ufs_dtogd(uspi, result); for (i = count; i < uspi->s_fpb; i++) ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i); i = uspi->s_fpb - count; DQUOT_FREE_BLOCK(inode, i); fs32_add(sb, &ucg->cg_cs.cs_nffree, i); uspi->cs_total.cs_nffree += i; fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i); fs32_add(sb, &ucg->cg_frsum[i], 1); goto succed; } result = ufs_bitmap_search (sb, ucpi, goal, allocsize); if (result == INVBLOCK) return 0; if(DQUOT_ALLOC_BLOCK(inode, count)) { *err = -EDQUOT; return 0; } for (i = 0; i < count; i++) ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i); fs32_sub(sb, &ucg->cg_cs.cs_nffree, count); uspi->cs_total.cs_nffree -= count; fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count); fs32_sub(sb, &ucg->cg_frsum[allocsize], 1); if (count != allocsize) fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1); succed: ubh_mark_buffer_dirty (USPI_UBH(uspi)); ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); if (sb->s_flags & MS_SYNCHRONOUS) { ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); ubh_wait_on_buffer (UCPI_UBH(ucpi)); } mark_sb_dirty(sb); result += cgno * uspi->s_fpg; UFSD("EXIT3, result %llu\n", (unsigned long long)result); return result; }
static u64 ufs_add_fragments(struct inode *inode, u64 fragment, unsigned oldcount, unsigned newcount, int *err) { struct super_block * sb; struct ufs_sb_private_info * uspi; struct ufs_super_block_first * usb1; struct ufs_cg_private_info * ucpi; struct ufs_cylinder_group * ucg; unsigned cgno, fragno, fragoff, count, fragsize, i; UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n", (unsigned long long)fragment, oldcount, newcount); sb = inode->i_sb; uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first (uspi); count = newcount - oldcount; cgno = ufs_dtog(uspi, fragment); if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count) return 0; if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb) return 0; ucpi = ufs_load_cylinder (sb, cgno); if (!ucpi) return 0; ucg = ubh_get_ucg (UCPI_UBH(ucpi)); if (!ufs_cg_chkmagic(sb, ucg)) { ufs_panic (sb, "ufs_add_fragments", "internal error, bad magic number on cg %u", cgno); return 0; } fragno = ufs_dtogd(uspi, fragment); fragoff = ufs_fragnum (fragno); for (i = oldcount; i < newcount; i++) if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i)) return 0; /* * Block can be extended */ ucg->cg_time = cpu_to_fs32(sb, get_seconds()); for (i = newcount; i < (uspi->s_fpb - fragoff); i++) if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i)) break; fragsize = i - oldcount; if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize])) ufs_panic (sb, "ufs_add_fragments", "internal error or corrupted bitmap on cg %u", cgno); fs32_sub(sb, &ucg->cg_frsum[fragsize], 1); if (fragsize != count) fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1); for (i = oldcount; i < newcount; i++) ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i); if(DQUOT_ALLOC_BLOCK(inode, count)) { *err = -EDQUOT; return 0; } fs32_sub(sb, &ucg->cg_cs.cs_nffree, count); fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count); uspi->cs_total.cs_nffree -= count; ubh_mark_buffer_dirty (USPI_UBH(uspi)); ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); if (sb->s_flags & MS_SYNCHRONOUS) { ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); ubh_wait_on_buffer (UCPI_UBH(ucpi)); } mark_sb_dirty(sb); UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment); return fragment; }
/* * Free 'count' fragments from fragment number 'fragment' */ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count) { struct super_block * sb; struct ufs_sb_private_info * uspi; struct ufs_super_block_first * usb1; struct ufs_cg_private_info * ucpi; struct ufs_cylinder_group * ucg; unsigned cgno, bit, end_bit, bbase, blkmap, i; u64 blkno; sb = inode->i_sb; uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first(uspi); UFSD("ENTER, fragment %llu, count %u\n", (unsigned long long)fragment, count); if (ufs_fragnum(fragment) + count > uspi->s_fpg) ufs_error (sb, "ufs_free_fragments", "internal error"); lock_super(sb); cgno = ufs_dtog(uspi, fragment); bit = ufs_dtogd(uspi, fragment); if (cgno >= uspi->s_ncg) { ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device"); goto failed; } ucpi = ufs_load_cylinder (sb, cgno); if (!ucpi) goto failed; ucg = ubh_get_ucg (UCPI_UBH(ucpi)); if (!ufs_cg_chkmagic(sb, ucg)) { ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno); goto failed; } end_bit = bit + count; bbase = ufs_blknum (bit); blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase); ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1); for (i = bit; i < end_bit; i++) { if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i)) ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i); else ufs_error (sb, "ufs_free_fragments", "bit already cleared for fragment %u", i); } DQUOT_FREE_BLOCK (inode, count); fs32_add(sb, &ucg->cg_cs.cs_nffree, count); uspi->cs_total.cs_nffree += count; fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count); blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase); ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1); /* * Trying to reassemble free fragments into block */ blkno = ufs_fragstoblks (bbase); if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) { fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb); uspi->cs_total.cs_nffree -= uspi->s_fpb; fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb); if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) ufs_clusteracct (sb, ucpi, blkno, 1); fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1); uspi->cs_total.cs_nbfree++; fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1); if (uspi->fs_magic != UFS2_MAGIC) { unsigned cylno = ufs_cbtocylno (bbase); fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(bbase)), 1); fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1); } } ubh_mark_buffer_dirty (USPI_UBH(uspi)); ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); if (sb->s_flags & MS_SYNCHRONOUS) { ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); ubh_wait_on_buffer (UCPI_UBH(ucpi)); } mark_sb_dirty(sb); unlock_super (sb); UFSD("EXIT\n"); return; failed: unlock_super (sb); UFSD("EXIT (FAILED)\n"); return; }
/* * Free 'count' fragments from fragment number 'fragment' (free whole blocks) */ void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count) { struct super_block * sb; struct ufs_sb_private_info * uspi; struct ufs_super_block_first * usb1; struct ufs_cg_private_info * ucpi; struct ufs_cylinder_group * ucg; unsigned overflow, cgno, bit, end_bit, i; u64 blkno; sb = inode->i_sb; uspi = UFS_SB(sb)->s_uspi; usb1 = ubh_get_usb_first(uspi); UFSD("ENTER, fragment %llu, count %u\n", (unsigned long long)fragment, count); if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) { ufs_error (sb, "ufs_free_blocks", "internal error, " "fragment %llu, count %u\n", (unsigned long long)fragment, count); goto failed; } lock_super(sb); do_more: overflow = 0; cgno = ufs_dtog(uspi, fragment); bit = ufs_dtogd(uspi, fragment); if (cgno >= uspi->s_ncg) { ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device"); goto failed_unlock; } end_bit = bit + count; if (end_bit > uspi->s_fpg) { overflow = bit + count - uspi->s_fpg; count -= overflow; end_bit -= overflow; } ucpi = ufs_load_cylinder (sb, cgno); if (!ucpi) goto failed_unlock; ucg = ubh_get_ucg (UCPI_UBH(ucpi)); if (!ufs_cg_chkmagic(sb, ucg)) { ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno); goto failed_unlock; } for (i = bit; i < end_bit; i += uspi->s_fpb) { blkno = ufs_fragstoblks(i); if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) { ufs_error(sb, "ufs_free_blocks", "freeing free fragment"); } ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) ufs_clusteracct (sb, ucpi, blkno, 1); DQUOT_FREE_BLOCK(inode, uspi->s_fpb); fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1); uspi->cs_total.cs_nbfree++; fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1); if (uspi->fs_magic != UFS2_MAGIC) { unsigned cylno = ufs_cbtocylno(i); fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(i)), 1); fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1); } } ubh_mark_buffer_dirty (USPI_UBH(uspi)); ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); if (sb->s_flags & MS_SYNCHRONOUS) { ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); ubh_wait_on_buffer (UCPI_UBH(ucpi)); } if (overflow) { fragment += count; count = overflow; goto do_more; } mark_sb_dirty(sb); unlock_super (sb); UFSD("EXIT\n"); return; failed_unlock: unlock_super (sb); failed: UFSD("EXIT (FAILED)\n"); return; }
static int ufs_trunc_dindirect(struct inode *inode, u64 offset, void *p) { struct super_block * sb; struct ufs_sb_private_info * uspi; struct ufs_buffer_head *dind_bh; u64 i, tmp, dindirect_block; void *dind; int retry = 0; UFSD("ENTER: ino %lu\n", inode->i_ino); sb = inode->i_sb; uspi = UFS_SB(sb)->s_uspi; dindirect_block = (DIRECT_BLOCK > offset) ? ((DIRECT_BLOCK - offset) >> uspi->s_apbshift) : 0; retry = 0; tmp = ufs_data_ptr_to_cpu(sb, p); if (!tmp) return 0; dind_bh = ubh_bread(sb, tmp, uspi->s_bsize); if (tmp != ufs_data_ptr_to_cpu(sb, p)) { ubh_brelse (dind_bh); return 1; } if (!dind_bh) { ufs_data_ptr_clear(uspi, p); return 0; } for (i = dindirect_block ; i < uspi->s_apb ; i++) { dind = ubh_get_data_ptr(uspi, dind_bh, i); tmp = ufs_data_ptr_to_cpu(sb, dind); if (!tmp) continue; retry |= ufs_trunc_indirect (inode, offset + (i << uspi->s_apbshift), dind); ubh_mark_buffer_dirty(dind_bh); } for (i = 0; i < uspi->s_apb; i++) if (!ufs_is_data_ptr_zero(uspi, ubh_get_data_ptr(uspi, dind_bh, i))) break; if (i >= uspi->s_apb) { tmp = ufs_data_ptr_to_cpu(sb, p); ufs_data_ptr_clear(uspi, p); ufs_free_blocks(inode, tmp, uspi->s_fpb); mark_inode_dirty(inode); ubh_bforget(dind_bh); dind_bh = NULL; } if (IS_SYNC(inode) && dind_bh && ubh_buffer_dirty(dind_bh)) ubh_sync_block(dind_bh); ubh_brelse (dind_bh); UFSD("EXIT: ino %lu\n", inode->i_ino); return retry; }
static int ufs_trunc_dindirect (struct inode *inode, unsigned offset, __fs32 *p) { struct super_block * sb; struct ufs_sb_private_info * uspi; struct ufs_buffer_head * dind_bh; unsigned i, tmp, dindirect_block; __fs32 * dind; int retry = 0; UFSD("ENTER\n"); sb = inode->i_sb; uspi = UFS_SB(sb)->s_uspi; dindirect_block = (DIRECT_BLOCK > offset) ? ((DIRECT_BLOCK - offset) >> uspi->s_apbshift) : 0; retry = 0; tmp = fs32_to_cpu(sb, *p); if (!tmp) return 0; dind_bh = ubh_bread(sb, tmp, uspi->s_bsize); if (tmp != fs32_to_cpu(sb, *p)) { ubh_brelse (dind_bh); return 1; } if (!dind_bh) { *p = 0; return 0; } for (i = dindirect_block ; i < uspi->s_apb ; i++) { dind = ubh_get_addr32 (dind_bh, i); tmp = fs32_to_cpu(sb, *dind); if (!tmp) continue; retry |= ufs_trunc_indirect (inode, offset + (i << uspi->s_apbshift), dind); ubh_mark_buffer_dirty(dind_bh); } for (i = 0; i < uspi->s_apb; i++) if (*ubh_get_addr32 (dind_bh, i)) break; if (i >= uspi->s_apb) { tmp = fs32_to_cpu(sb, *p); *p = 0; ufs_free_blocks(inode, tmp, uspi->s_fpb); mark_inode_dirty(inode); ubh_bforget(dind_bh); dind_bh = NULL; } if (IS_SYNC(inode) && dind_bh && ubh_buffer_dirty(dind_bh)) { ubh_ll_rw_block(SWRITE, dind_bh); ubh_wait_on_buffer (dind_bh); } ubh_brelse (dind_bh); UFSD("EXIT\n"); return retry; }
/* * There are two policies for allocating an inode. If the new inode is * a directory, then a forward search is made for a block group with both * free space and a low directory-to-inode ratio; if that fails, then of * the groups with above-average free space, that group with the fewest * directories already is chosen. * * For other inodes, search forward from the parent directory's block * group to find a free inode. */ struct inode * ufs_new_inode(struct inode * dir, int mode) { struct super_block * sb; struct ufs_sb_info * sbi; struct ufs_sb_private_info * uspi; struct ufs_super_block_first * usb1; struct ufs_cg_private_info * ucpi; struct ufs_cylinder_group * ucg; struct inode * inode; unsigned cg, bit, i, j, start; struct ufs_inode_info *ufsi; int err = -ENOSPC; UFSD("ENTER\n"); /* Cannot create files in a deleted directory */ if (!dir || !dir->i_nlink) return ERR_PTR(-EPERM); sb = dir->i_sb; inode = new_inode(sb); if (!inode) return ERR_PTR(-ENOMEM); ufsi = UFS_I(inode); sbi = UFS_SB(sb); uspi = sbi->s_uspi; usb1 = ubh_get_usb_first(uspi); lock_super (sb); /* * Try to place the inode in its parent directory */ i = ufs_inotocg(dir->i_ino); if (sbi->fs_cs(i).cs_nifree) { cg = i; goto cg_found; } /* * Use a quadratic hash to find a group with a free inode */ for ( j = 1; j < uspi->s_ncg; j <<= 1 ) { i += j; if (i >= uspi->s_ncg) i -= uspi->s_ncg; if (sbi->fs_cs(i).cs_nifree) { cg = i; goto cg_found; } } /* * That failed: try linear search for a free inode */ i = ufs_inotocg(dir->i_ino) + 1; for (j = 2; j < uspi->s_ncg; j++) { i++; if (i >= uspi->s_ncg) i = 0; if (sbi->fs_cs(i).cs_nifree) { cg = i; goto cg_found; } } goto failed; cg_found: ucpi = ufs_load_cylinder (sb, cg); if (!ucpi) { err = -EIO; goto failed; } ucg = ubh_get_ucg(UCPI_UBH(ucpi)); if (!ufs_cg_chkmagic(sb, ucg)) ufs_panic (sb, "ufs_new_inode", "internal error, bad cg magic number"); start = ucpi->c_irotor; bit = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_iusedoff, uspi->s_ipg, start); if (!(bit < uspi->s_ipg)) { bit = ubh_find_first_zero_bit (UCPI_UBH(ucpi), ucpi->c_iusedoff, start); if (!(bit < start)) { ufs_error (sb, "ufs_new_inode", "cylinder group %u corrupted - error in inode bitmap\n", cg); err = -EIO; goto failed; } } UFSD("start = %u, bit = %u, ipg = %u\n", start, bit, uspi->s_ipg); if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_iusedoff, bit)) ubh_setbit (UCPI_UBH(ucpi), ucpi->c_iusedoff, bit); else { ufs_panic (sb, "ufs_new_inode", "internal error"); err = -EIO; goto failed; } if (uspi->fs_magic == UFS2_MAGIC) { u32 initediblk = fs32_to_cpu(sb, ucg->cg_u.cg_u2.cg_initediblk); if (bit + uspi->s_inopb > initediblk && initediblk < fs32_to_cpu(sb, ucg->cg_u.cg_u2.cg_niblk)) ufs2_init_inodes_chunk(sb, ucpi, ucg); } fs32_sub(sb, &ucg->cg_cs.cs_nifree, 1); uspi->cs_total.cs_nifree--; fs32_sub(sb, &sbi->fs_cs(cg).cs_nifree, 1); if (S_ISDIR(mode)) { fs32_add(sb, &ucg->cg_cs.cs_ndir, 1); uspi->cs_total.cs_ndir++; fs32_add(sb, &sbi->fs_cs(cg).cs_ndir, 1); } ubh_mark_buffer_dirty (USPI_UBH(uspi)); ubh_mark_buffer_dirty (UCPI_UBH(ucpi)); if (sb->s_flags & MS_SYNCHRONOUS) { ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi)); ubh_wait_on_buffer (UCPI_UBH(ucpi)); } sb->s_dirt = 1; inode->i_ino = cg * uspi->s_ipg + bit; inode->i_mode = mode; inode->i_uid = current->fsuid; if (dir->i_mode & S_ISGID) { inode->i_gid = dir->i_gid; if (S_ISDIR(mode)) inode->i_mode |= S_ISGID; } else inode->i_gid = current->fsgid; inode->i_blocks = 0; inode->i_generation = 0; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; ufsi->i_flags = UFS_I(dir)->i_flags; ufsi->i_lastfrag = 0; ufsi->i_shadow = 0; ufsi->i_osync = 0; ufsi->i_oeftflag = 0; ufsi->i_dir_start_lookup = 0; memset(&ufsi->i_u1, 0, sizeof(ufsi->i_u1)); insert_inode_hash(inode); mark_inode_dirty(inode); if (uspi->fs_magic == UFS2_MAGIC) { struct buffer_head *bh; struct ufs2_inode *ufs2_inode; /* * setup birth date, we do it here because of there is no sense * to hold it in struct ufs_inode_info, and lose 64 bit */ bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino)); if (!bh) { ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino); err = -EIO; goto fail_remove_inode; } lock_buffer(bh); ufs2_inode = (struct ufs2_inode *)bh->b_data; ufs2_inode += ufs_inotofsbo(inode->i_ino); ufs2_inode->ui_birthtime = cpu_to_fs64(sb, CURRENT_TIME.tv_sec); ufs2_inode->ui_birthnsec = cpu_to_fs32(sb, CURRENT_TIME.tv_nsec); mark_buffer_dirty(bh); unlock_buffer(bh); if (sb->s_flags & MS_SYNCHRONOUS) sync_dirty_buffer(bh); brelse(bh); } unlock_super (sb); if (DQUOT_ALLOC_INODE(inode)) { DQUOT_DROP(inode); err = -EDQUOT; goto fail_without_unlock; } UFSD("allocating inode %lu\n", inode->i_ino); UFSD("EXIT\n"); return inode; fail_remove_inode: unlock_super(sb); fail_without_unlock: inode->i_flags |= S_NOQUOTA; inode->i_nlink = 0; iput(inode); UFSD("EXIT (FAILED): err %d\n", err); return ERR_PTR(err); failed: unlock_super (sb); make_bad_inode(inode); iput (inode); UFSD("EXIT (FAILED): err %d\n", err); return ERR_PTR(err); }
/* * Free 'count' fragments from fragment number 'fragment' */ void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count) { struct super_block * sb; struct ufs_sb_private_info * uspi; struct ufs_super_block_first * usb1; struct ufs_cg_private_info * ucpi; struct ufs_cylinder_group * ucg; unsigned cgno, bit, end_bit, bbase, blkmap, i, blkno, cylno; unsigned swab; sb = inode->i_sb; uspi = sb->u.ufs_sb.s_uspi; swab = sb->u.ufs_sb.s_swab; usb1 = ubh_get_usb_first(USPI_UBH); UFSD(("ENTER, fragment %u, count %u\n", fragment, count)) if (ufs_fragnum(fragment) + count > uspi->s_fpg) ufs_error (sb, "ufs_free_fragments", "internal error"); lock_super(sb); cgno = ufs_dtog(fragment); bit = ufs_dtogd(fragment); if (cgno >= uspi->s_ncg) { ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device"); goto failed; } ucpi = ufs_load_cylinder (sb, cgno); if (!ucpi) goto failed; ucg = ubh_get_ucg (UCPI_UBH); if (!ufs_cg_chkmagic (ucg)) { ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno); goto failed; } end_bit = bit + count; bbase = ufs_blknum (bit); blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase); ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1); for (i = bit; i < end_bit; i++) { if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, i)) ubh_setbit (UCPI_UBH, ucpi->c_freeoff, i); else ufs_error (sb, "ufs_free_fragments", "bit already cleared for fragment %u", i); } DQUOT_FREE_BLOCK (sb, inode, count); ADD_SWAB32(ucg->cg_cs.cs_nffree, count); ADD_SWAB32(usb1->fs_cstotal.cs_nffree, count); ADD_SWAB32(sb->fs_cs(cgno).cs_nffree, count); blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase); ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1); /* * Trying to reassemble free fragments into block */ blkno = ufs_fragstoblks (bbase); if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) { SUB_SWAB32(ucg->cg_cs.cs_nffree, uspi->s_fpb); SUB_SWAB32(usb1->fs_cstotal.cs_nffree, uspi->s_fpb); SUB_SWAB32(sb->fs_cs(cgno).cs_nffree, uspi->s_fpb); if ((sb->u.ufs_sb.s_flags & UFS_CG_MASK) == UFS_CG_44BSD) ufs_clusteracct (sb, ucpi, blkno, 1); INC_SWAB32(ucg->cg_cs.cs_nbfree); INC_SWAB32(usb1->fs_cstotal.cs_nbfree); INC_SWAB32(sb->fs_cs(cgno).cs_nbfree); cylno = ufs_cbtocylno (bbase); INC_SWAB16(ubh_cg_blks (ucpi, cylno, ufs_cbtorpos(bbase))); INC_SWAB32(ubh_cg_blktot (ucpi, cylno)); } ubh_mark_buffer_dirty (USPI_UBH); ubh_mark_buffer_dirty (UCPI_UBH); if (sb->s_flags & MS_SYNCHRONOUS) { ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi); ubh_wait_on_buffer (UCPI_UBH); } sb->s_dirt = 1; unlock_super (sb); UFSD(("EXIT\n")) return; failed: unlock_super (sb); UFSD(("EXIT (FAILED)\n")) return; }
/* * There are two policies for allocating an inode. If the new inode is * a directory, then a forward search is made for a block group with both * free space and a low directory-to-inode ratio; if that fails, then of * the groups with above-average free space, that group with the fewest * directories already is chosen. * * For other inodes, search forward from the parent directory's block * group to find a free inode. */ struct inode * ufs_new_inode(struct inode * dir, int mode) { struct super_block * sb; struct ufs_sb_info * sbi; struct ufs_sb_private_info * uspi; struct ufs_super_block_first * usb1; struct ufs_cg_private_info * ucpi; struct ufs_cylinder_group * ucg; struct inode * inode; unsigned cg, bit, i, j, start; struct ufs_inode_info *ufsi; UFSD(("ENTER\n")) /* Cannot create files in a deleted directory */ if (!dir || !dir->i_nlink) return ERR_PTR(-EPERM); sb = dir->i_sb; inode = new_inode(sb); if (!inode) return ERR_PTR(-ENOMEM); ufsi = UFS_I(inode); sbi = UFS_SB(sb); uspi = sbi->s_uspi; usb1 = ubh_get_usb_first(USPI_UBH); lock_super (sb); /* * Try to place the inode in its parent directory */ i = ufs_inotocg(dir->i_ino); if (sbi->fs_cs(i).cs_nifree) { cg = i; goto cg_found; } /* * Use a quadratic hash to find a group with a free inode */ for ( j = 1; j < uspi->s_ncg; j <<= 1 ) { i += j; if (i >= uspi->s_ncg) i -= uspi->s_ncg; if (sbi->fs_cs(i).cs_nifree) { cg = i; goto cg_found; } } /* * That failed: try linear search for a free inode */ i = ufs_inotocg(dir->i_ino) + 1; for (j = 2; j < uspi->s_ncg; j++) { i++; if (i >= uspi->s_ncg) i = 0; if (sbi->fs_cs(i).cs_nifree) { cg = i; goto cg_found; } } goto failed; cg_found: ucpi = ufs_load_cylinder (sb, cg); if (!ucpi) goto failed; ucg = ubh_get_ucg(UCPI_UBH); if (!ufs_cg_chkmagic(sb, ucg)) ufs_panic (sb, "ufs_new_inode", "internal error, bad cg magic number"); start = ucpi->c_irotor; bit = ubh_find_next_zero_bit (UCPI_UBH, ucpi->c_iusedoff, uspi->s_ipg, start); if (!(bit < uspi->s_ipg)) { bit = ubh_find_first_zero_bit (UCPI_UBH, ucpi->c_iusedoff, start); if (!(bit < start)) { ufs_error (sb, "ufs_new_inode", "cylinder group %u corrupted - error in inode bitmap\n", cg); goto failed; } } UFSD(("start = %u, bit = %u, ipg = %u\n", start, bit, uspi->s_ipg)) if (ubh_isclr (UCPI_UBH, ucpi->c_iusedoff, bit)) ubh_setbit (UCPI_UBH, ucpi->c_iusedoff, bit); else { ufs_panic (sb, "ufs_new_inode", "internal error"); goto failed; } fs32_sub(sb, &ucg->cg_cs.cs_nifree, 1); fs32_sub(sb, &usb1->fs_cstotal.cs_nifree, 1); fs32_sub(sb, &sbi->fs_cs(cg).cs_nifree, 1); if (S_ISDIR(mode)) { fs32_add(sb, &ucg->cg_cs.cs_ndir, 1); fs32_add(sb, &usb1->fs_cstotal.cs_ndir, 1); fs32_add(sb, &sbi->fs_cs(cg).cs_ndir, 1); } ubh_mark_buffer_dirty (USPI_UBH); ubh_mark_buffer_dirty (UCPI_UBH); if (sb->s_flags & MS_SYNCHRONOUS) { ubh_wait_on_buffer (UCPI_UBH); ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **) &ucpi); ubh_wait_on_buffer (UCPI_UBH); } sb->s_dirt = 1; inode->i_mode = mode; inode->i_uid = current->fsuid; if (dir->i_mode & S_ISGID) { inode->i_gid = dir->i_gid; if (S_ISDIR(mode)) inode->i_mode |= S_ISGID; } else inode->i_gid = current->fsgid; inode->i_ino = cg * uspi->s_ipg + bit; inode->i_blksize = PAGE_SIZE; /* This is the optimal IO size (for stat), not the fs block size */ inode->i_blocks = 0; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; ufsi->i_flags = UFS_I(dir)->i_flags; ufsi->i_lastfrag = 0; ufsi->i_gen = 0; ufsi->i_shadow = 0; ufsi->i_osync = 0; ufsi->i_oeftflag = 0; memset(&ufsi->i_u1, 0, sizeof(ufsi->i_u1)); insert_inode_hash(inode); mark_inode_dirty(inode); unlock_super (sb); if (DQUOT_ALLOC_INODE(inode)) { DQUOT_DROP(inode); inode->i_flags |= S_NOQUOTA; inode->i_nlink = 0; iput(inode); return ERR_PTR(-EDQUOT); } UFSD(("allocating inode %lu\n", inode->i_ino)) UFSD(("EXIT\n")) return inode; failed: unlock_super (sb); make_bad_inode(inode); iput (inode); UFSD(("EXIT (FAILED)\n")) return ERR_PTR(-ENOSPC); }
static int ufs_trunc_indirect(struct inode *inode, u64 offset, void *p) { struct super_block * sb; struct ufs_sb_private_info * uspi; struct ufs_buffer_head * ind_ubh; void *ind; u64 tmp, indirect_block, i, frag_to_free; unsigned free_count; int retry; UFSD("ENTER: ino %lu, offset %llu, p: %p\n", inode->i_ino, (unsigned long long)offset, p); BUG_ON(!p); sb = inode->i_sb; uspi = UFS_SB(sb)->s_uspi; frag_to_free = 0; free_count = 0; retry = 0; tmp = ufs_data_ptr_to_cpu(sb, p); if (!tmp) return 0; ind_ubh = ubh_bread(sb, tmp, uspi->s_bsize); if (tmp != ufs_data_ptr_to_cpu(sb, p)) { ubh_brelse (ind_ubh); return 1; } if (!ind_ubh) { ufs_data_ptr_clear(uspi, p); return 0; } indirect_block = (DIRECT_BLOCK > offset) ? (DIRECT_BLOCK - offset) : 0; for (i = indirect_block; i < uspi->s_apb; i++) { ind = ubh_get_data_ptr(uspi, ind_ubh, i); tmp = ufs_data_ptr_to_cpu(sb, ind); if (!tmp) continue; ufs_data_ptr_clear(uspi, ind); ubh_mark_buffer_dirty(ind_ubh); if (free_count == 0) { frag_to_free = tmp; free_count = uspi->s_fpb; } else if (free_count > 0 && frag_to_free == tmp - free_count) free_count += uspi->s_fpb; else { ufs_free_blocks (inode, frag_to_free, free_count); frag_to_free = tmp; free_count = uspi->s_fpb; } mark_inode_dirty(inode); } if (free_count > 0) { ufs_free_blocks (inode, frag_to_free, free_count); } for (i = 0; i < uspi->s_apb; i++) if (!ufs_is_data_ptr_zero(uspi, ubh_get_data_ptr(uspi, ind_ubh, i))) break; if (i >= uspi->s_apb) { tmp = ufs_data_ptr_to_cpu(sb, p); ufs_data_ptr_clear(uspi, p); ufs_free_blocks (inode, tmp, uspi->s_fpb); mark_inode_dirty(inode); ubh_bforget(ind_ubh); ind_ubh = NULL; } if (IS_SYNC(inode) && ind_ubh && ubh_buffer_dirty(ind_ubh)) ubh_sync_block(ind_ubh); ubh_brelse (ind_ubh); UFSD("EXIT: ino %lu\n", inode->i_ino); return retry; }
static int ufs_trunc_indirect (struct inode * inode, unsigned offset, __fs32 *p) { struct super_block * sb; struct ufs_sb_private_info * uspi; struct ufs_buffer_head * ind_ubh; __fs32 * ind; unsigned indirect_block, i, tmp; unsigned frag_to_free, free_count; int retry; UFSD("ENTER\n"); sb = inode->i_sb; uspi = UFS_SB(sb)->s_uspi; frag_to_free = 0; free_count = 0; retry = 0; tmp = fs32_to_cpu(sb, *p); if (!tmp) return 0; ind_ubh = ubh_bread(sb, tmp, uspi->s_bsize); if (tmp != fs32_to_cpu(sb, *p)) { ubh_brelse (ind_ubh); return 1; } if (!ind_ubh) { *p = 0; return 0; } indirect_block = (DIRECT_BLOCK > offset) ? (DIRECT_BLOCK - offset) : 0; for (i = indirect_block; i < uspi->s_apb; i++) { ind = ubh_get_addr32 (ind_ubh, i); tmp = fs32_to_cpu(sb, *ind); if (!tmp) continue; *ind = 0; ubh_mark_buffer_dirty(ind_ubh); if (free_count == 0) { frag_to_free = tmp; free_count = uspi->s_fpb; } else if (free_count > 0 && frag_to_free == tmp - free_count) free_count += uspi->s_fpb; else { ufs_free_blocks (inode, frag_to_free, free_count); frag_to_free = tmp; free_count = uspi->s_fpb; } mark_inode_dirty(inode); } if (free_count > 0) { ufs_free_blocks (inode, frag_to_free, free_count); } for (i = 0; i < uspi->s_apb; i++) if (*ubh_get_addr32(ind_ubh,i)) break; if (i >= uspi->s_apb) { tmp = fs32_to_cpu(sb, *p); *p = 0; ufs_free_blocks (inode, tmp, uspi->s_fpb); mark_inode_dirty(inode); ubh_bforget(ind_ubh); ind_ubh = NULL; } if (IS_SYNC(inode) && ind_ubh && ubh_buffer_dirty(ind_ubh)) { ubh_ll_rw_block(SWRITE, ind_ubh); ubh_wait_on_buffer (ind_ubh); } ubh_brelse (ind_ubh); UFSD("EXIT\n"); return retry; }