Example #1
0
/*
 * Find a block of the specified size in the specified cylinder group.
 * @sp: pointer to super block
 * @ucpi: pointer to cylinder group info
 * @goal: near which block we want find new one
 * @count: specified size
 */
static u64 ufs_bitmap_search(struct super_block *sb,
			     struct ufs_cg_private_info *ucpi,
			     u64 goal, unsigned count)
{
	/*
	 * Bit patterns for identifying fragments in the block map
	 * used as ((map & mask_arr) == want_arr)
	 */
	static const int mask_arr[9] = {
		0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
	};
	static const int want_arr[9] = {
		0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
	};
	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
	struct ufs_super_block_first *usb1;
	struct ufs_cylinder_group *ucg;
	unsigned start, length, loc;
	unsigned pos, want, blockmap, mask, end;
	u64 result;

	UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi->c_cgx,
	     (unsigned long long)goal, count);

	usb1 = ubh_get_usb_first (uspi);
	ucg = ubh_get_ucg(UCPI_UBH(ucpi));

	if (goal)
		start = ufs_dtogd(uspi, goal) >> 3;
	else
Example #2
0
static u64 ufs_alloccg_block(struct inode *inode,
			     struct ufs_cg_private_info *ucpi,
			     u64 goal, int *err)
{
	struct super_block * sb;
	struct ufs_sb_private_info * uspi;
	struct ufs_super_block_first * usb1;
	struct ufs_cylinder_group * ucg;
	u64 result, blkno;

	UFSD("ENTER, goal %llu\n", (unsigned long long)goal);

	sb = inode->i_sb;
	uspi = UFS_SB(sb)->s_uspi;
	usb1 = ubh_get_usb_first(uspi);
	ucg = ubh_get_ucg(UCPI_UBH(ucpi));

	if (goal == 0) {
		goal = ucpi->c_rotor;
		goto norot;
	}
	goal = ufs_blknum (goal);
	goal = ufs_dtogd(uspi, goal);
	
	/*
	 * If the requested block is available, use it.
	 */
	if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
		result = goal;
		goto gotit;
	}
	
norot:	
	result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
	if (result == INVBLOCK)
		return INVBLOCK;
	ucpi->c_rotor = result;
gotit:
	blkno = ufs_fragstoblks(result);
	ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
	if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
		ufs_clusteracct (sb, ucpi, blkno, -1);

	fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
	uspi->cs_total.cs_nbfree--;
	fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);

	if (uspi->fs_magic != UFS2_MAGIC) {
		unsigned cylno = ufs_cbtocylno((unsigned)result);

		fs16_sub(sb, &ubh_cg_blks(ucpi, cylno,
					  ufs_cbtorpos((unsigned)result)), 1);
		fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
	}
	
	UFSD("EXIT, result %llu\n", (unsigned long long)result);

	return result;
}
Example #3
0
/*
 * Remove cylinder group from cache, doesn't release memory
 * allocated for cylinder group (this is done at ufs_put_super only).
 */
void ufs_put_cylinder (struct super_block * sb, unsigned bitmap_nr)
{
	struct ufs_sb_info * sbi = UFS_SB(sb);
	struct ufs_sb_private_info * uspi; 
	struct ufs_cg_private_info * ucpi;
	struct ufs_cylinder_group * ucg;
	unsigned i;

	UFSD("ENTER, bitmap_nr %u\n", bitmap_nr);

	uspi = sbi->s_uspi;
	if (sbi->s_cgno[bitmap_nr] == UFS_CGNO_EMPTY) {
		UFSD("EXIT\n");
		return;
	}
	ucpi = sbi->s_ucpi[bitmap_nr];
	ucg = ubh_get_ucg(UCPI_UBH(ucpi));

	if (uspi->s_ncg > UFS_MAX_GROUP_LOADED && bitmap_nr >= sbi->s_cg_loaded) {
		ufs_panic (sb, "ufs_put_cylinder", "internal error");
		return;
	}
	/*
	 * rotor is not so important data, so we put it to disk 
	 * at the end of working with cylinder
	 */
	ucg->cg_rotor = cpu_to_fs32(sb, ucpi->c_rotor);
	ucg->cg_frotor = cpu_to_fs32(sb, ucpi->c_frotor);
	ucg->cg_irotor = cpu_to_fs32(sb, ucpi->c_irotor);
	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
	for (i = 1; i < UCPI_UBH(ucpi)->count; i++) {
		brelse (UCPI_UBH(ucpi)->bh[i]);
	}

	sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY;
	UFSD("EXIT\n");
}
Example #4
0
static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
			       u64 goal, unsigned count, int *err)
{
	struct super_block * sb;
	struct ufs_sb_private_info * uspi;
	struct ufs_super_block_first * usb1;
	struct ufs_cg_private_info * ucpi;
	struct ufs_cylinder_group * ucg;
	unsigned oldcg, i, j, k, allocsize;
	u64 result;
	
	UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
	     inode->i_ino, cgno, (unsigned long long)goal, count);

	sb = inode->i_sb;
	uspi = UFS_SB(sb)->s_uspi;
	usb1 = ubh_get_usb_first(uspi);
	oldcg = cgno;
	
	/*
	 * 1. searching on preferred cylinder group
	 */
	UFS_TEST_FREE_SPACE_CG

	/*
	 * 2. quadratic rehash
	 */
	for (j = 1; j < uspi->s_ncg; j *= 2) {
		cgno += j;
		if (cgno >= uspi->s_ncg) 
			cgno -= uspi->s_ncg;
		UFS_TEST_FREE_SPACE_CG
	}

	/*
	 * 3. brute force search
	 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
	 */
	cgno = (oldcg + 1) % uspi->s_ncg;
	for (j = 2; j < uspi->s_ncg; j++) {
		cgno++;
		if (cgno >= uspi->s_ncg)
			cgno = 0;
		UFS_TEST_FREE_SPACE_CG
	}
	
	UFSD("EXIT (FAILED)\n");
	return 0;

cg_found:
	ucpi = ufs_load_cylinder (sb, cgno);
	if (!ucpi)
		return 0;
	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
	if (!ufs_cg_chkmagic(sb, ucg)) 
		ufs_panic (sb, "ufs_alloc_fragments",
			"internal error, bad magic number on cg %u", cgno);
	ucg->cg_time = cpu_to_fs32(sb, get_seconds());

	if (count == uspi->s_fpb) {
		result = ufs_alloccg_block (inode, ucpi, goal, err);
		if (result == INVBLOCK)
			return 0;
		goto succed;
	}

	for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
		if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
			break;
	
	if (allocsize == uspi->s_fpb) {
		result = ufs_alloccg_block (inode, ucpi, goal, err);
		if (result == INVBLOCK)
			return 0;
		goal = ufs_dtogd(uspi, result);
		for (i = count; i < uspi->s_fpb; i++)
			ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
		i = uspi->s_fpb - count;
		DQUOT_FREE_BLOCK(inode, i);

		fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
		uspi->cs_total.cs_nffree += i;
		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
		fs32_add(sb, &ucg->cg_frsum[i], 1);
		goto succed;
	}

	result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
	if (result == INVBLOCK)
		return 0;
	if(DQUOT_ALLOC_BLOCK(inode, count)) {
		*err = -EDQUOT;
		return 0;
	}
	for (i = 0; i < count; i++)
		ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
	
	fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
	uspi->cs_total.cs_nffree -= count;
	fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
	fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);

	if (count != allocsize)
		fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);

succed:
	ubh_mark_buffer_dirty (USPI_UBH(uspi));
	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
	if (sb->s_flags & MS_SYNCHRONOUS) {
		ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
		ubh_wait_on_buffer (UCPI_UBH(ucpi));
	}
	mark_sb_dirty(sb);

	result += cgno * uspi->s_fpg;
	UFSD("EXIT3, result %llu\n", (unsigned long long)result);
	return result;
}
Example #5
0
static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
			     unsigned oldcount, unsigned newcount, int *err)
{
	struct super_block * sb;
	struct ufs_sb_private_info * uspi;
	struct ufs_super_block_first * usb1;
	struct ufs_cg_private_info * ucpi;
	struct ufs_cylinder_group * ucg;
	unsigned cgno, fragno, fragoff, count, fragsize, i;
	
	UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
	     (unsigned long long)fragment, oldcount, newcount);
	
	sb = inode->i_sb;
	uspi = UFS_SB(sb)->s_uspi;
	usb1 = ubh_get_usb_first (uspi);
	count = newcount - oldcount;
	
	cgno = ufs_dtog(uspi, fragment);
	if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
		return 0;
	if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
		return 0;
	ucpi = ufs_load_cylinder (sb, cgno);
	if (!ucpi)
		return 0;
	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
	if (!ufs_cg_chkmagic(sb, ucg)) {
		ufs_panic (sb, "ufs_add_fragments",
			"internal error, bad magic number on cg %u", cgno);
		return 0;
	}

	fragno = ufs_dtogd(uspi, fragment);
	fragoff = ufs_fragnum (fragno);
	for (i = oldcount; i < newcount; i++)
		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
			return 0;
	/*
	 * Block can be extended
	 */
	ucg->cg_time = cpu_to_fs32(sb, get_seconds());
	for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
			break;
	fragsize = i - oldcount;
	if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
		ufs_panic (sb, "ufs_add_fragments",
			"internal error or corrupted bitmap on cg %u", cgno);
	fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
	if (fragsize != count)
		fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
	for (i = oldcount; i < newcount; i++)
		ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
	if(DQUOT_ALLOC_BLOCK(inode, count)) {
		*err = -EDQUOT;
		return 0;
	}

	fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
	fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
	uspi->cs_total.cs_nffree -= count;
	
	ubh_mark_buffer_dirty (USPI_UBH(uspi));
	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
	if (sb->s_flags & MS_SYNCHRONOUS) {
		ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
		ubh_wait_on_buffer (UCPI_UBH(ucpi));
	}
	mark_sb_dirty(sb);

	UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
	
	return fragment;
}
Example #6
0
/*
 * Free 'count' fragments from fragment number 'fragment'
 */
void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
{
	struct super_block * sb;
	struct ufs_sb_private_info * uspi;
	struct ufs_super_block_first * usb1;
	struct ufs_cg_private_info * ucpi;
	struct ufs_cylinder_group * ucg;
	unsigned cgno, bit, end_bit, bbase, blkmap, i;
	u64 blkno;
	
	sb = inode->i_sb;
	uspi = UFS_SB(sb)->s_uspi;
	usb1 = ubh_get_usb_first(uspi);
	
	UFSD("ENTER, fragment %llu, count %u\n",
	     (unsigned long long)fragment, count);
	
	if (ufs_fragnum(fragment) + count > uspi->s_fpg)
		ufs_error (sb, "ufs_free_fragments", "internal error");
	
	lock_super(sb);
	
	cgno = ufs_dtog(uspi, fragment);
	bit = ufs_dtogd(uspi, fragment);
	if (cgno >= uspi->s_ncg) {
		ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
		goto failed;
	}
		
	ucpi = ufs_load_cylinder (sb, cgno);
	if (!ucpi) 
		goto failed;
	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
	if (!ufs_cg_chkmagic(sb, ucg)) {
		ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
		goto failed;
	}

	end_bit = bit + count;
	bbase = ufs_blknum (bit);
	blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
	ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
	for (i = bit; i < end_bit; i++) {
		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
			ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
		else 
			ufs_error (sb, "ufs_free_fragments",
				   "bit already cleared for fragment %u", i);
	}
	
	DQUOT_FREE_BLOCK (inode, count);

	
	fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
	uspi->cs_total.cs_nffree += count;
	fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
	blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
	ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);

	/*
	 * Trying to reassemble free fragments into block
	 */
	blkno = ufs_fragstoblks (bbase);
	if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
		fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
		uspi->cs_total.cs_nffree -= uspi->s_fpb;
		fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
		if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
			ufs_clusteracct (sb, ucpi, blkno, 1);
		fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
		uspi->cs_total.cs_nbfree++;
		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
		if (uspi->fs_magic != UFS2_MAGIC) {
			unsigned cylno = ufs_cbtocylno (bbase);

			fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
						  ufs_cbtorpos(bbase)), 1);
			fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
		}
	}
	
	ubh_mark_buffer_dirty (USPI_UBH(uspi));
	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
	if (sb->s_flags & MS_SYNCHRONOUS) {
		ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
		ubh_wait_on_buffer (UCPI_UBH(ucpi));
	}
	mark_sb_dirty(sb);
	
	unlock_super (sb);
	UFSD("EXIT\n");
	return;

failed:
	unlock_super (sb);
	UFSD("EXIT (FAILED)\n");
	return;
}
Example #7
0
/*
 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
 */
void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
{
	struct super_block * sb;
	struct ufs_sb_private_info * uspi;
	struct ufs_super_block_first * usb1;
	struct ufs_cg_private_info * ucpi;
	struct ufs_cylinder_group * ucg;
	unsigned overflow, cgno, bit, end_bit, i;
	u64 blkno;
	
	sb = inode->i_sb;
	uspi = UFS_SB(sb)->s_uspi;
	usb1 = ubh_get_usb_first(uspi);

	UFSD("ENTER, fragment %llu, count %u\n",
	     (unsigned long long)fragment, count);
	
	if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
		ufs_error (sb, "ufs_free_blocks", "internal error, "
			   "fragment %llu, count %u\n",
			   (unsigned long long)fragment, count);
		goto failed;
	}

	lock_super(sb);
	
do_more:
	overflow = 0;
	cgno = ufs_dtog(uspi, fragment);
	bit = ufs_dtogd(uspi, fragment);
	if (cgno >= uspi->s_ncg) {
		ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
		goto failed_unlock;
	}
	end_bit = bit + count;
	if (end_bit > uspi->s_fpg) {
		overflow = bit + count - uspi->s_fpg;
		count -= overflow;
		end_bit -= overflow;
	}

	ucpi = ufs_load_cylinder (sb, cgno);
	if (!ucpi) 
		goto failed_unlock;
	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
	if (!ufs_cg_chkmagic(sb, ucg)) {
		ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
		goto failed_unlock;
	}

	for (i = bit; i < end_bit; i += uspi->s_fpb) {
		blkno = ufs_fragstoblks(i);
		if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
			ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
		}
		ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
		if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
			ufs_clusteracct (sb, ucpi, blkno, 1);
		DQUOT_FREE_BLOCK(inode, uspi->s_fpb);

		fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
		uspi->cs_total.cs_nbfree++;
		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);

		if (uspi->fs_magic != UFS2_MAGIC) {
			unsigned cylno = ufs_cbtocylno(i);

			fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
						  ufs_cbtorpos(i)), 1);
			fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
		}
	}

	ubh_mark_buffer_dirty (USPI_UBH(uspi));
	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
	if (sb->s_flags & MS_SYNCHRONOUS) {
		ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
		ubh_wait_on_buffer (UCPI_UBH(ucpi));
	}

	if (overflow) {
		fragment += count;
		count = overflow;
		goto do_more;
	}

	mark_sb_dirty(sb);
	unlock_super (sb);
	UFSD("EXIT\n");
	return;

failed_unlock:
	unlock_super (sb);
failed:
	UFSD("EXIT (FAILED)\n");
	return;
}
Example #8
0
File: ialloc.c Project: 274914765/C
/*
 * NOTE! When we get the inode, we're the only people
 * that have access to it, and as such there are no
 * race conditions we have to worry about. The inode
 * is not on the hash-lists, and it cannot be reached
 * through the filesystem because the directory entry
 * has been deleted earlier.
 *
 * HOWEVER: we must make sure that we get no aliases,
 * which means that we have to call "clear_inode()"
 * _before_ we mark the inode not in use in the inode
 * bitmaps. Otherwise a newly created file might use
 * the same inode number (not actually the same pointer
 * though), and then we'd have two inodes sharing the
 * same inode number and space on the harddisk.
 */
void ufs_free_inode (struct inode * inode)
{
    struct super_block * sb;
    struct ufs_sb_private_info * uspi;
    struct ufs_super_block_first * usb1;
    struct ufs_cg_private_info * ucpi;
    struct ufs_cylinder_group * ucg;
    int is_directory;
    unsigned ino, cg, bit;
    
    UFSD("ENTER, ino %lu\n", inode->i_ino);

    sb = inode->i_sb;
    uspi = UFS_SB(sb)->s_uspi;
    usb1 = ubh_get_usb_first(uspi);
    
    ino = inode->i_ino;

    lock_super (sb);

    if (!((ino > 1) && (ino < (uspi->s_ncg * uspi->s_ipg )))) {
        ufs_warning(sb, "ufs_free_inode", "reserved inode or nonexistent inode %u\n", ino);
        unlock_super (sb);
        return;
    }
    
    cg = ufs_inotocg (ino);
    bit = ufs_inotocgoff (ino);
    ucpi = ufs_load_cylinder (sb, cg);
    if (!ucpi) {
        unlock_super (sb);
        return;
    }
    ucg = ubh_get_ucg(UCPI_UBH(ucpi));
    if (!ufs_cg_chkmagic(sb, ucg))
        ufs_panic (sb, "ufs_free_fragments", "internal error, bad cg magic number");

    ucg->cg_time = cpu_to_fs32(sb, get_seconds());

    is_directory = S_ISDIR(inode->i_mode);

    DQUOT_FREE_INODE(inode);
    DQUOT_DROP(inode);

    clear_inode (inode);

    if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_iusedoff, bit))
        ufs_error(sb, "ufs_free_inode", "bit already cleared for inode %u", ino);
    else {
        ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_iusedoff, bit);
        if (ino < ucpi->c_irotor)
            ucpi->c_irotor = ino;
        fs32_add(sb, &ucg->cg_cs.cs_nifree, 1);
        uspi->cs_total.cs_nifree++;
        fs32_add(sb, &UFS_SB(sb)->fs_cs(cg).cs_nifree, 1);

        if (is_directory) {
            fs32_sub(sb, &ucg->cg_cs.cs_ndir, 1);
            uspi->cs_total.cs_ndir--;
            fs32_sub(sb, &UFS_SB(sb)->fs_cs(cg).cs_ndir, 1);
        }
    }

    ubh_mark_buffer_dirty (USPI_UBH(uspi));
    ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
    if (sb->s_flags & MS_SYNCHRONOUS) {
        ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
        ubh_wait_on_buffer (UCPI_UBH(ucpi));
    }
    
    sb->s_dirt = 1;
    unlock_super (sb);
    UFSD("EXIT\n");
}
Example #9
0
File: ialloc.c Project: 274914765/C
/*
 * There are two policies for allocating an inode.  If the new inode is
 * a directory, then a forward search is made for a block group with both
 * free space and a low directory-to-inode ratio; if that fails, then of
 * the groups with above-average free space, that group with the fewest
 * directories already is chosen.
 *
 * For other inodes, search forward from the parent directory's block
 * group to find a free inode.
 */
struct inode * ufs_new_inode(struct inode * dir, int mode)
{
    struct super_block * sb;
    struct ufs_sb_info * sbi;
    struct ufs_sb_private_info * uspi;
    struct ufs_super_block_first * usb1;
    struct ufs_cg_private_info * ucpi;
    struct ufs_cylinder_group * ucg;
    struct inode * inode;
    unsigned cg, bit, i, j, start;
    struct ufs_inode_info *ufsi;
    int err = -ENOSPC;

    UFSD("ENTER\n");
    
    /* Cannot create files in a deleted directory */
    if (!dir || !dir->i_nlink)
        return ERR_PTR(-EPERM);
    sb = dir->i_sb;
    inode = new_inode(sb);
    if (!inode)
        return ERR_PTR(-ENOMEM);
    ufsi = UFS_I(inode);
    sbi = UFS_SB(sb);
    uspi = sbi->s_uspi;
    usb1 = ubh_get_usb_first(uspi);

    lock_super (sb);

    /*
     * Try to place the inode in its parent directory
     */
    i = ufs_inotocg(dir->i_ino);
    if (sbi->fs_cs(i).cs_nifree) {
        cg = i;
        goto cg_found;
    }

    /*
     * Use a quadratic hash to find a group with a free inode
     */
    for ( j = 1; j < uspi->s_ncg; j <<= 1 ) {
        i += j;
        if (i >= uspi->s_ncg)
            i -= uspi->s_ncg;
        if (sbi->fs_cs(i).cs_nifree) {
            cg = i;
            goto cg_found;
        }
    }

    /*
     * That failed: try linear search for a free inode
     */
    i = ufs_inotocg(dir->i_ino) + 1;
    for (j = 2; j < uspi->s_ncg; j++) {
        i++;
        if (i >= uspi->s_ncg)
            i = 0;
        if (sbi->fs_cs(i).cs_nifree) {
            cg = i;
            goto cg_found;
        }
    }

    goto failed;

cg_found:
    ucpi = ufs_load_cylinder (sb, cg);
    if (!ucpi) {
        err = -EIO;
        goto failed;
    }
    ucg = ubh_get_ucg(UCPI_UBH(ucpi));
    if (!ufs_cg_chkmagic(sb, ucg)) 
        ufs_panic (sb, "ufs_new_inode", "internal error, bad cg magic number");

    start = ucpi->c_irotor;
    bit = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_iusedoff, uspi->s_ipg, start);
    if (!(bit < uspi->s_ipg)) {
        bit = ubh_find_first_zero_bit (UCPI_UBH(ucpi), ucpi->c_iusedoff, start);
        if (!(bit < start)) {
            ufs_error (sb, "ufs_new_inode",
                "cylinder group %u corrupted - error in inode bitmap\n", cg);
            err = -EIO;
            goto failed;
        }
    }
    UFSD("start = %u, bit = %u, ipg = %u\n", start, bit, uspi->s_ipg);
    if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_iusedoff, bit))
        ubh_setbit (UCPI_UBH(ucpi), ucpi->c_iusedoff, bit);
    else {
        ufs_panic (sb, "ufs_new_inode", "internal error");
        err = -EIO;
        goto failed;
    }

    if (uspi->fs_magic == UFS2_MAGIC) {
        u32 initediblk = fs32_to_cpu(sb, ucg->cg_u.cg_u2.cg_initediblk);

        if (bit + uspi->s_inopb > initediblk &&
            initediblk < fs32_to_cpu(sb, ucg->cg_u.cg_u2.cg_niblk))
            ufs2_init_inodes_chunk(sb, ucpi, ucg);
    }

    fs32_sub(sb, &ucg->cg_cs.cs_nifree, 1);
    uspi->cs_total.cs_nifree--;
    fs32_sub(sb, &sbi->fs_cs(cg).cs_nifree, 1);
    
    if (S_ISDIR(mode)) {
        fs32_add(sb, &ucg->cg_cs.cs_ndir, 1);
        uspi->cs_total.cs_ndir++;
        fs32_add(sb, &sbi->fs_cs(cg).cs_ndir, 1);
    }
    ubh_mark_buffer_dirty (USPI_UBH(uspi));
    ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
    if (sb->s_flags & MS_SYNCHRONOUS) {
        ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
        ubh_wait_on_buffer (UCPI_UBH(ucpi));
    }
    sb->s_dirt = 1;

    inode->i_ino = cg * uspi->s_ipg + bit;
    inode->i_mode = mode;
    inode->i_uid = current->fsuid;
    if (dir->i_mode & S_ISGID) {
        inode->i_gid = dir->i_gid;
        if (S_ISDIR(mode))
            inode->i_mode |= S_ISGID;
    } else
        inode->i_gid = current->fsgid;

    inode->i_blocks = 0;
    inode->i_generation = 0;
    inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
    ufsi->i_flags = UFS_I(dir)->i_flags;
    ufsi->i_lastfrag = 0;
    ufsi->i_shadow = 0;
    ufsi->i_osync = 0;
    ufsi->i_oeftflag = 0;
    ufsi->i_dir_start_lookup = 0;
    memset(&ufsi->i_u1, 0, sizeof(ufsi->i_u1));
    insert_inode_hash(inode);
    mark_inode_dirty(inode);

    if (uspi->fs_magic == UFS2_MAGIC) {
        struct buffer_head *bh;
        struct ufs2_inode *ufs2_inode;

        /*
         * setup birth date, we do it here because of there is no sense
         * to hold it in struct ufs_inode_info, and lose 64 bit
         */
        bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
        if (!bh) {
            ufs_warning(sb, "ufs_read_inode",
                    "unable to read inode %lu\n",
                    inode->i_ino);
            err = -EIO;
            goto fail_remove_inode;
        }
        lock_buffer(bh);
        ufs2_inode = (struct ufs2_inode *)bh->b_data;
        ufs2_inode += ufs_inotofsbo(inode->i_ino);
        ufs2_inode->ui_birthtime = cpu_to_fs64(sb, CURRENT_TIME.tv_sec);
        ufs2_inode->ui_birthnsec = cpu_to_fs32(sb, CURRENT_TIME.tv_nsec);
        mark_buffer_dirty(bh);
        unlock_buffer(bh);
        if (sb->s_flags & MS_SYNCHRONOUS)
            sync_dirty_buffer(bh);
        brelse(bh);
    }

    unlock_super (sb);

    if (DQUOT_ALLOC_INODE(inode)) {
        DQUOT_DROP(inode);
        err = -EDQUOT;
        goto fail_without_unlock;
    }

    UFSD("allocating inode %lu\n", inode->i_ino);
    UFSD("EXIT\n");
    return inode;

fail_remove_inode:
    unlock_super(sb);
fail_without_unlock:
    inode->i_flags |= S_NOQUOTA;
    inode->i_nlink = 0;
    iput(inode);
    UFSD("EXIT (FAILED): err %d\n", err);
    return ERR_PTR(err);
failed:
    unlock_super (sb);
    make_bad_inode(inode);
    iput (inode);
    UFSD("EXIT (FAILED): err %d\n", err);
    return ERR_PTR(err);
}
Example #10
0
/*
 * There are two policies for allocating an inode.  If the new inode is
 * a directory, then a forward search is made for a block group with both
 * free space and a low directory-to-inode ratio; if that fails, then of
 * the groups with above-average free space, that group with the fewest
 * directories already is chosen.
 *
 * For other inodes, search forward from the parent directory's block
 * group to find a free inode.
 */
struct inode * ufs_new_inode(struct inode * dir, int mode)
{
	struct super_block * sb;
	struct ufs_sb_info * sbi;
	struct ufs_sb_private_info * uspi;
	struct ufs_super_block_first * usb1;
	struct ufs_cg_private_info * ucpi;
	struct ufs_cylinder_group * ucg;
	struct inode * inode;
	unsigned cg, bit, i, j, start;
	struct ufs_inode_info *ufsi;

	UFSD(("ENTER\n"))
	
	/* Cannot create files in a deleted directory */
	if (!dir || !dir->i_nlink)
		return ERR_PTR(-EPERM);
	sb = dir->i_sb;
	inode = new_inode(sb);
	if (!inode)
		return ERR_PTR(-ENOMEM);
	ufsi = UFS_I(inode);
	sbi = UFS_SB(sb);
	uspi = sbi->s_uspi;
	usb1 = ubh_get_usb_first(USPI_UBH);

	lock_super (sb);

	/*
	 * Try to place the inode in its parent directory
	 */
	i = ufs_inotocg(dir->i_ino);
	if (sbi->fs_cs(i).cs_nifree) {
		cg = i;
		goto cg_found;
	}

	/*
	 * Use a quadratic hash to find a group with a free inode
	 */
	for ( j = 1; j < uspi->s_ncg; j <<= 1 ) {
		i += j;
		if (i >= uspi->s_ncg)
			i -= uspi->s_ncg;
		if (sbi->fs_cs(i).cs_nifree) {
			cg = i;
			goto cg_found;
		}
	}

	/*
	 * That failed: try linear search for a free inode
	 */
	i = ufs_inotocg(dir->i_ino) + 1;
	for (j = 2; j < uspi->s_ncg; j++) {
		i++;
		if (i >= uspi->s_ncg)
			i = 0;
		if (sbi->fs_cs(i).cs_nifree) {
			cg = i;
			goto cg_found;
		}
	}
	
	goto failed;

cg_found:
	ucpi = ufs_load_cylinder (sb, cg);
	if (!ucpi)
		goto failed;
	ucg = ubh_get_ucg(UCPI_UBH);
	if (!ufs_cg_chkmagic(sb, ucg)) 
		ufs_panic (sb, "ufs_new_inode", "internal error, bad cg magic number");

	start = ucpi->c_irotor;
	bit = ubh_find_next_zero_bit (UCPI_UBH, ucpi->c_iusedoff, uspi->s_ipg, start);
	if (!(bit < uspi->s_ipg)) {
		bit = ubh_find_first_zero_bit (UCPI_UBH, ucpi->c_iusedoff, start);
		if (!(bit < start)) {
			ufs_error (sb, "ufs_new_inode",
			    "cylinder group %u corrupted - error in inode bitmap\n", cg);
			goto failed;
		}
	}
	UFSD(("start = %u, bit = %u, ipg = %u\n", start, bit, uspi->s_ipg))
	if (ubh_isclr (UCPI_UBH, ucpi->c_iusedoff, bit))
		ubh_setbit (UCPI_UBH, ucpi->c_iusedoff, bit);
	else {
		ufs_panic (sb, "ufs_new_inode", "internal error");
		goto failed;
	}
	
	fs32_sub(sb, &ucg->cg_cs.cs_nifree, 1);
	fs32_sub(sb, &usb1->fs_cstotal.cs_nifree, 1);
	fs32_sub(sb, &sbi->fs_cs(cg).cs_nifree, 1);
	
	if (S_ISDIR(mode)) {
		fs32_add(sb, &ucg->cg_cs.cs_ndir, 1);
		fs32_add(sb, &usb1->fs_cstotal.cs_ndir, 1);
		fs32_add(sb, &sbi->fs_cs(cg).cs_ndir, 1);
	}

	ubh_mark_buffer_dirty (USPI_UBH);
	ubh_mark_buffer_dirty (UCPI_UBH);
	if (sb->s_flags & MS_SYNCHRONOUS) {
		ubh_wait_on_buffer (UCPI_UBH);
		ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **) &ucpi);
		ubh_wait_on_buffer (UCPI_UBH);
	}
	sb->s_dirt = 1;

	inode->i_mode = mode;
	inode->i_uid = current->fsuid;
	if (dir->i_mode & S_ISGID) {
		inode->i_gid = dir->i_gid;
		if (S_ISDIR(mode))
			inode->i_mode |= S_ISGID;
	} else
		inode->i_gid = current->fsgid;

	inode->i_ino = cg * uspi->s_ipg + bit;
	inode->i_blksize = PAGE_SIZE;	/* This is the optimal IO size (for stat), not the fs block size */
	inode->i_blocks = 0;
	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
	ufsi->i_flags = UFS_I(dir)->i_flags;
	ufsi->i_lastfrag = 0;
	ufsi->i_gen = 0;
	ufsi->i_shadow = 0;
	ufsi->i_osync = 0;
	ufsi->i_oeftflag = 0;
	memset(&ufsi->i_u1, 0, sizeof(ufsi->i_u1));

	insert_inode_hash(inode);
	mark_inode_dirty(inode);

	unlock_super (sb);

	if (DQUOT_ALLOC_INODE(inode)) {
		DQUOT_DROP(inode);
		inode->i_flags |= S_NOQUOTA;
		inode->i_nlink = 0;
		iput(inode);
		return ERR_PTR(-EDQUOT);
	}

	UFSD(("allocating inode %lu\n", inode->i_ino))
	UFSD(("EXIT\n"))
	return inode;

failed:
	unlock_super (sb);
	make_bad_inode(inode);
	iput (inode);
	UFSD(("EXIT (FAILED)\n"))
	return ERR_PTR(-ENOSPC);
}
Example #11
0
/*
 * Free 'count' fragments from fragment number 'fragment'
 */
void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count) {
	struct super_block * sb;
	struct ufs_sb_private_info * uspi;
	struct ufs_super_block_first * usb1;
	struct ufs_cg_private_info * ucpi;
	struct ufs_cylinder_group * ucg;
	unsigned cgno, bit, end_bit, bbase, blkmap, i, blkno, cylno;
	unsigned swab;
	
	sb = inode->i_sb;
	uspi = sb->u.ufs_sb.s_uspi;
	swab = sb->u.ufs_sb.s_swab;
	usb1 = ubh_get_usb_first(USPI_UBH);
	
	UFSD(("ENTER, fragment %u, count %u\n", fragment, count))
	
	if (ufs_fragnum(fragment) + count > uspi->s_fpg)
		ufs_error (sb, "ufs_free_fragments", "internal error");
	
	lock_super(sb);
	
	cgno = ufs_dtog(fragment);
	bit = ufs_dtogd(fragment);
	if (cgno >= uspi->s_ncg) {
		ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
		goto failed;
	}
		
	ucpi = ufs_load_cylinder (sb, cgno);
	if (!ucpi) 
		goto failed;
	ucg = ubh_get_ucg (UCPI_UBH);
	if (!ufs_cg_chkmagic (ucg)) {
		ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
		goto failed;
	}

	end_bit = bit + count;
	bbase = ufs_blknum (bit);
	blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase);
	ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
	for (i = bit; i < end_bit; i++) {
		if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, i))
			ubh_setbit (UCPI_UBH, ucpi->c_freeoff, i);
		else ufs_error (sb, "ufs_free_fragments",
			"bit already cleared for fragment %u", i);
	}
	
	DQUOT_FREE_BLOCK (sb, inode, count);
	ADD_SWAB32(ucg->cg_cs.cs_nffree, count);
	ADD_SWAB32(usb1->fs_cstotal.cs_nffree, count);
	ADD_SWAB32(sb->fs_cs(cgno).cs_nffree, count);
	blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase);
	ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);

	/*
	 * Trying to reassemble free fragments into block
	 */
	blkno = ufs_fragstoblks (bbase);
	if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) {
		SUB_SWAB32(ucg->cg_cs.cs_nffree, uspi->s_fpb);
		SUB_SWAB32(usb1->fs_cstotal.cs_nffree, uspi->s_fpb);
		SUB_SWAB32(sb->fs_cs(cgno).cs_nffree, uspi->s_fpb);
		if ((sb->u.ufs_sb.s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
			ufs_clusteracct (sb, ucpi, blkno, 1);
		INC_SWAB32(ucg->cg_cs.cs_nbfree);
		INC_SWAB32(usb1->fs_cstotal.cs_nbfree);
		INC_SWAB32(sb->fs_cs(cgno).cs_nbfree);
		cylno = ufs_cbtocylno (bbase);
		INC_SWAB16(ubh_cg_blks (ucpi, cylno, ufs_cbtorpos(bbase)));
		INC_SWAB32(ubh_cg_blktot (ucpi, cylno));
	}
	
	ubh_mark_buffer_dirty (USPI_UBH);
	ubh_mark_buffer_dirty (UCPI_UBH);
	if (sb->s_flags & MS_SYNCHRONOUS) {
		ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
		ubh_wait_on_buffer (UCPI_UBH);
	}
	sb->s_dirt = 1;
	
	unlock_super (sb);
	UFSD(("EXIT\n"))
	return;

failed:
	unlock_super (sb);
	UFSD(("EXIT (FAILED)\n"))
	return;
}
Example #12
0
/*
 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
 */
void ufs_free_blocks (struct inode * inode, unsigned fragment, unsigned count) {
	struct super_block * sb;
	struct ufs_sb_private_info * uspi;
	struct ufs_super_block_first * usb1;
	struct ufs_cg_private_info * ucpi;
	struct ufs_cylinder_group * ucg;
	unsigned overflow, cgno, bit, end_bit, blkno, i, cylno;
	unsigned swab;
	
	sb = inode->i_sb;
	uspi = sb->u.ufs_sb.s_uspi;
	swab = sb->u.ufs_sb.s_swab;
	usb1 = ubh_get_usb_first(USPI_UBH);

	UFSD(("ENTER, fragment %u, count %u\n", fragment, count))
	
	if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
		ufs_error (sb, "ufs_free_blocks", "internal error, "
			"fragment %u, count %u\n", fragment, count);
		goto failed;
	}

	lock_super(sb);
	
do_more:
	overflow = 0;
	cgno = ufs_dtog (fragment);
	bit = ufs_dtogd (fragment);
	if (cgno >= uspi->s_ncg) {
		ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
		goto failed;
	}
	end_bit = bit + count;
	if (end_bit > uspi->s_fpg) {
		overflow = bit + count - uspi->s_fpg;
		count -= overflow;
		end_bit -= overflow;
	}

	ucpi = ufs_load_cylinder (sb, cgno);
	if (!ucpi) 
		goto failed;
	ucg = ubh_get_ucg (UCPI_UBH);
	if (!ufs_cg_chkmagic (ucg)) {
		ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
		goto failed;
	}

	for (i = bit; i < end_bit; i += uspi->s_fpb) {
		blkno = ufs_fragstoblks(i);
		if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) {
			ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
		}
		ubh_setblock(UCPI_UBH, ucpi->c_freeoff, blkno);
		if ((sb->u.ufs_sb.s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
			ufs_clusteracct (sb, ucpi, blkno, 1);
		DQUOT_FREE_BLOCK(sb, inode, uspi->s_fpb);
		INC_SWAB32(ucg->cg_cs.cs_nbfree);
		INC_SWAB32(usb1->fs_cstotal.cs_nbfree);
		INC_SWAB32(sb->fs_cs(cgno).cs_nbfree);
		cylno = ufs_cbtocylno(i);
		INC_SWAB16(ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(i)));
		INC_SWAB32(ubh_cg_blktot(ucpi, cylno));
	}

	ubh_mark_buffer_dirty (USPI_UBH);
	ubh_mark_buffer_dirty (UCPI_UBH);
	if (sb->s_flags & MS_SYNCHRONOUS) {
		ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
		ubh_wait_on_buffer (UCPI_UBH);
	}

	if (overflow) {
		fragment += count;
		count = overflow;
		goto do_more;
	}

	sb->s_dirt = 1;
	unlock_super (sb);
	UFSD(("EXIT\n"))
	return;

failed:
	unlock_super (sb);
	UFSD(("EXIT (FAILED)\n"))
	return;
}