Exemple #1
0
static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
			       u64 goal, unsigned count, int *err)
{
	struct super_block * sb;
	struct ufs_sb_private_info * uspi;
	struct ufs_super_block_first * usb1;
	struct ufs_cg_private_info * ucpi;
	struct ufs_cylinder_group * ucg;
	unsigned oldcg, i, j, k, allocsize;
	u64 result;
	
	UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
	     inode->i_ino, cgno, (unsigned long long)goal, count);

	sb = inode->i_sb;
	uspi = UFS_SB(sb)->s_uspi;
	usb1 = ubh_get_usb_first(uspi);
	oldcg = cgno;
	
	/*
	 * 1. searching on preferred cylinder group
	 */
	UFS_TEST_FREE_SPACE_CG

	/*
	 * 2. quadratic rehash
	 */
	for (j = 1; j < uspi->s_ncg; j *= 2) {
		cgno += j;
		if (cgno >= uspi->s_ncg) 
			cgno -= uspi->s_ncg;
		UFS_TEST_FREE_SPACE_CG
	}

	/*
	 * 3. brute force search
	 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
	 */
	cgno = (oldcg + 1) % uspi->s_ncg;
	for (j = 2; j < uspi->s_ncg; j++) {
		cgno++;
		if (cgno >= uspi->s_ncg)
			cgno = 0;
		UFS_TEST_FREE_SPACE_CG
	}
	
	UFSD("EXIT (FAILED)\n");
	return 0;

cg_found:
	ucpi = ufs_load_cylinder (sb, cgno);
	if (!ucpi)
		return 0;
	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
	if (!ufs_cg_chkmagic(sb, ucg)) 
		ufs_panic (sb, "ufs_alloc_fragments",
			"internal error, bad magic number on cg %u", cgno);
	ucg->cg_time = cpu_to_fs32(sb, get_seconds());

	if (count == uspi->s_fpb) {
		result = ufs_alloccg_block (inode, ucpi, goal, err);
		if (result == INVBLOCK)
			return 0;
		goto succed;
	}

	for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
		if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
			break;
	
	if (allocsize == uspi->s_fpb) {
		result = ufs_alloccg_block (inode, ucpi, goal, err);
		if (result == INVBLOCK)
			return 0;
		goal = ufs_dtogd(uspi, result);
		for (i = count; i < uspi->s_fpb; i++)
			ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
		i = uspi->s_fpb - count;
		DQUOT_FREE_BLOCK(inode, i);

		fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
		uspi->cs_total.cs_nffree += i;
		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
		fs32_add(sb, &ucg->cg_frsum[i], 1);
		goto succed;
	}

	result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
	if (result == INVBLOCK)
		return 0;
	if(DQUOT_ALLOC_BLOCK(inode, count)) {
		*err = -EDQUOT;
		return 0;
	}
	for (i = 0; i < count; i++)
		ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
	
	fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
	uspi->cs_total.cs_nffree -= count;
	fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
	fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);

	if (count != allocsize)
		fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);

succed:
	ubh_mark_buffer_dirty (USPI_UBH(uspi));
	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
	if (sb->s_flags & MS_SYNCHRONOUS) {
		ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
		ubh_wait_on_buffer (UCPI_UBH(ucpi));
	}
	mark_sb_dirty(sb);

	result += cgno * uspi->s_fpg;
	UFSD("EXIT3, result %llu\n", (unsigned long long)result);
	return result;
}
Exemple #2
0
/*
 * Read on-disk structures associated with cylinder groups
 */
static int ufs_read_cylinder_structures(struct super_block *sb)
{
	struct ufs_sb_info *sbi = UFS_SB(sb);
	struct ufs_sb_private_info *uspi = sbi->s_uspi;
	struct ufs_buffer_head * ubh;
	unsigned char * base, * space;
	unsigned size, blks, i;

	UFSD("ENTER\n");

	/*
	 * Read cs structures from (usually) first data block
	 * on the device. 
	 */
	size = uspi->s_cssize;
	blks = (size + uspi->s_fsize - 1) >> uspi->s_fshift;
	base = space = kmalloc(size, GFP_NOFS);
	if (!base)
		goto failed; 
	sbi->s_csp = (struct ufs_csum *)space;
	for (i = 0; i < blks; i += uspi->s_fpb) {
		size = uspi->s_bsize;
		if (i + uspi->s_fpb > blks)
			size = (blks - i) * uspi->s_fsize;

		ubh = ubh_bread(sb, uspi->s_csaddr + i, size);
		
		if (!ubh)
			goto failed;

		ubh_ubhcpymem (space, ubh, size);

		space += size;
		ubh_brelse (ubh);
		ubh = NULL;
	}

	/*
	 * Read cylinder group (we read only first fragment from block
	 * at this time) and prepare internal data structures for cg caching.
	 */
	if (!(sbi->s_ucg = kmalloc (sizeof(struct buffer_head *) * uspi->s_ncg, GFP_NOFS)))
		goto failed;
	for (i = 0; i < uspi->s_ncg; i++) 
		sbi->s_ucg[i] = NULL;
	for (i = 0; i < UFS_MAX_GROUP_LOADED; i++) {
		sbi->s_ucpi[i] = NULL;
		sbi->s_cgno[i] = UFS_CGNO_EMPTY;
	}
	for (i = 0; i < uspi->s_ncg; i++) {
		UFSD("read cg %u\n", i);
		if (!(sbi->s_ucg[i] = sb_bread(sb, ufs_cgcmin(i))))
			goto failed;
		if (!ufs_cg_chkmagic (sb, (struct ufs_cylinder_group *) sbi->s_ucg[i]->b_data))
			goto failed;

		ufs_print_cylinder_stuff(sb, (struct ufs_cylinder_group *) sbi->s_ucg[i]->b_data);
	}
	for (i = 0; i < UFS_MAX_GROUP_LOADED; i++) {
		if (!(sbi->s_ucpi[i] = kmalloc (sizeof(struct ufs_cg_private_info), GFP_NOFS)))
			goto failed;
		sbi->s_cgno[i] = UFS_CGNO_EMPTY;
	}
	sbi->s_cg_loaded = 0;
	UFSD("EXIT\n");
	return 1;

failed:
	kfree (base);
	if (sbi->s_ucg) {
		for (i = 0; i < uspi->s_ncg; i++)
			if (sbi->s_ucg[i])
				brelse (sbi->s_ucg[i]);
		kfree (sbi->s_ucg);
		for (i = 0; i < UFS_MAX_GROUP_LOADED; i++)
			kfree (sbi->s_ucpi[i]);
	}
	UFSD("EXIT (FAILED)\n");
	return 0;
}
Exemple #3
0
/*
 * Free 'count' fragments from fragment number 'fragment'
 */
void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
{
	struct super_block * sb;
	struct ufs_sb_private_info * uspi;
	struct ufs_super_block_first * usb1;
	struct ufs_cg_private_info * ucpi;
	struct ufs_cylinder_group * ucg;
	unsigned cgno, bit, end_bit, bbase, blkmap, i;
	u64 blkno;
	
	sb = inode->i_sb;
	uspi = UFS_SB(sb)->s_uspi;
	usb1 = ubh_get_usb_first(uspi);
	
	UFSD("ENTER, fragment %llu, count %u\n",
	     (unsigned long long)fragment, count);
	
	if (ufs_fragnum(fragment) + count > uspi->s_fpg)
		ufs_error (sb, "ufs_free_fragments", "internal error");
	
	lock_super(sb);
	
	cgno = ufs_dtog(uspi, fragment);
	bit = ufs_dtogd(uspi, fragment);
	if (cgno >= uspi->s_ncg) {
		ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
		goto failed;
	}
		
	ucpi = ufs_load_cylinder (sb, cgno);
	if (!ucpi) 
		goto failed;
	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
	if (!ufs_cg_chkmagic(sb, ucg)) {
		ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
		goto failed;
	}

	end_bit = bit + count;
	bbase = ufs_blknum (bit);
	blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
	ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
	for (i = bit; i < end_bit; i++) {
		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
			ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
		else 
			ufs_error (sb, "ufs_free_fragments",
				   "bit already cleared for fragment %u", i);
	}
	
	DQUOT_FREE_BLOCK (inode, count);

	
	fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
	uspi->cs_total.cs_nffree += count;
	fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
	blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
	ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);

	/*
	 * Trying to reassemble free fragments into block
	 */
	blkno = ufs_fragstoblks (bbase);
	if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
		fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
		uspi->cs_total.cs_nffree -= uspi->s_fpb;
		fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
		if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
			ufs_clusteracct (sb, ucpi, blkno, 1);
		fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
		uspi->cs_total.cs_nbfree++;
		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
		if (uspi->fs_magic != UFS2_MAGIC) {
			unsigned cylno = ufs_cbtocylno (bbase);

			fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
						  ufs_cbtorpos(bbase)), 1);
			fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
		}
	}
	
	ubh_mark_buffer_dirty (USPI_UBH(uspi));
	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
	if (sb->s_flags & MS_SYNCHRONOUS) {
		ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
		ubh_wait_on_buffer (UCPI_UBH(ucpi));
	}
	mark_sb_dirty(sb);
	
	unlock_super (sb);
	UFSD("EXIT\n");
	return;

failed:
	unlock_super (sb);
	UFSD("EXIT (FAILED)\n");
	return;
}
Exemple #4
0
static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
			     unsigned oldcount, unsigned newcount, int *err)
{
	struct super_block * sb;
	struct ufs_sb_private_info * uspi;
	struct ufs_super_block_first * usb1;
	struct ufs_cg_private_info * ucpi;
	struct ufs_cylinder_group * ucg;
	unsigned cgno, fragno, fragoff, count, fragsize, i;
	
	UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
	     (unsigned long long)fragment, oldcount, newcount);
	
	sb = inode->i_sb;
	uspi = UFS_SB(sb)->s_uspi;
	usb1 = ubh_get_usb_first (uspi);
	count = newcount - oldcount;
	
	cgno = ufs_dtog(uspi, fragment);
	if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
		return 0;
	if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
		return 0;
	ucpi = ufs_load_cylinder (sb, cgno);
	if (!ucpi)
		return 0;
	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
	if (!ufs_cg_chkmagic(sb, ucg)) {
		ufs_panic (sb, "ufs_add_fragments",
			"internal error, bad magic number on cg %u", cgno);
		return 0;
	}

	fragno = ufs_dtogd(uspi, fragment);
	fragoff = ufs_fragnum (fragno);
	for (i = oldcount; i < newcount; i++)
		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
			return 0;
	/*
	 * Block can be extended
	 */
	ucg->cg_time = cpu_to_fs32(sb, get_seconds());
	for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
			break;
	fragsize = i - oldcount;
	if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
		ufs_panic (sb, "ufs_add_fragments",
			"internal error or corrupted bitmap on cg %u", cgno);
	fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
	if (fragsize != count)
		fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
	for (i = oldcount; i < newcount; i++)
		ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
	if(DQUOT_ALLOC_BLOCK(inode, count)) {
		*err = -EDQUOT;
		return 0;
	}

	fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
	fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
	uspi->cs_total.cs_nffree -= count;
	
	ubh_mark_buffer_dirty (USPI_UBH(uspi));
	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
	if (sb->s_flags & MS_SYNCHRONOUS) {
		ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
		ubh_wait_on_buffer (UCPI_UBH(ucpi));
	}
	mark_sb_dirty(sb);

	UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
	
	return fragment;
}
Exemple #5
0
/*
 * NOTE! When we get the inode, we're the only people
 * that have access to it, and as such there are no
 * race conditions we have to worry about. The inode
 * is not on the hash-lists, and it cannot be reached
 * through the filesystem because the directory entry
 * has been deleted earlier.
 *
 * HOWEVER: we must make sure that we get no aliases,
 * which means that we have to call "clear_inode()"
 * _before_ we mark the inode not in use in the inode
 * bitmaps. Otherwise a newly created file might use
 * the same inode number (not actually the same pointer
 * though), and then we'd have two inodes sharing the
 * same inode number and space on the harddisk.
 */
void ufs_free_inode (struct inode * inode)
{
    struct super_block * sb;
    struct ufs_sb_private_info * uspi;
    struct ufs_super_block_first * usb1;
    struct ufs_cg_private_info * ucpi;
    struct ufs_cylinder_group * ucg;
    int is_directory;
    unsigned ino, cg, bit;
    
    UFSD("ENTER, ino %lu\n", inode->i_ino);

    sb = inode->i_sb;
    uspi = UFS_SB(sb)->s_uspi;
    usb1 = ubh_get_usb_first(uspi);
    
    ino = inode->i_ino;

    lock_super (sb);

    if (!((ino > 1) && (ino < (uspi->s_ncg * uspi->s_ipg )))) {
        ufs_warning(sb, "ufs_free_inode", "reserved inode or nonexistent inode %u\n", ino);
        unlock_super (sb);
        return;
    }
    
    cg = ufs_inotocg (ino);
    bit = ufs_inotocgoff (ino);
    ucpi = ufs_load_cylinder (sb, cg);
    if (!ucpi) {
        unlock_super (sb);
        return;
    }
    ucg = ubh_get_ucg(UCPI_UBH(ucpi));
    if (!ufs_cg_chkmagic(sb, ucg))
        ufs_panic (sb, "ufs_free_fragments", "internal error, bad cg magic number");

    ucg->cg_time = cpu_to_fs32(sb, get_seconds());

    is_directory = S_ISDIR(inode->i_mode);

    DQUOT_FREE_INODE(inode);
    DQUOT_DROP(inode);

    clear_inode (inode);

    if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_iusedoff, bit))
        ufs_error(sb, "ufs_free_inode", "bit already cleared for inode %u", ino);
    else {
        ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_iusedoff, bit);
        if (ino < ucpi->c_irotor)
            ucpi->c_irotor = ino;
        fs32_add(sb, &ucg->cg_cs.cs_nifree, 1);
        uspi->cs_total.cs_nifree++;
        fs32_add(sb, &UFS_SB(sb)->fs_cs(cg).cs_nifree, 1);

        if (is_directory) {
            fs32_sub(sb, &ucg->cg_cs.cs_ndir, 1);
            uspi->cs_total.cs_ndir--;
            fs32_sub(sb, &UFS_SB(sb)->fs_cs(cg).cs_ndir, 1);
        }
    }

    ubh_mark_buffer_dirty (USPI_UBH(uspi));
    ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
    if (sb->s_flags & MS_SYNCHRONOUS) {
        ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
        ubh_wait_on_buffer (UCPI_UBH(ucpi));
    }
    
    sb->s_dirt = 1;
    unlock_super (sb);
    UFSD("EXIT\n");
}
Exemple #6
0
/*
 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
 */
void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
{
	struct super_block * sb;
	struct ufs_sb_private_info * uspi;
	struct ufs_super_block_first * usb1;
	struct ufs_cg_private_info * ucpi;
	struct ufs_cylinder_group * ucg;
	unsigned overflow, cgno, bit, end_bit, i;
	u64 blkno;
	
	sb = inode->i_sb;
	uspi = UFS_SB(sb)->s_uspi;
	usb1 = ubh_get_usb_first(uspi);

	UFSD("ENTER, fragment %llu, count %u\n",
	     (unsigned long long)fragment, count);
	
	if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
		ufs_error (sb, "ufs_free_blocks", "internal error, "
			   "fragment %llu, count %u\n",
			   (unsigned long long)fragment, count);
		goto failed;
	}

	lock_super(sb);
	
do_more:
	overflow = 0;
	cgno = ufs_dtog(uspi, fragment);
	bit = ufs_dtogd(uspi, fragment);
	if (cgno >= uspi->s_ncg) {
		ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
		goto failed_unlock;
	}
	end_bit = bit + count;
	if (end_bit > uspi->s_fpg) {
		overflow = bit + count - uspi->s_fpg;
		count -= overflow;
		end_bit -= overflow;
	}

	ucpi = ufs_load_cylinder (sb, cgno);
	if (!ucpi) 
		goto failed_unlock;
	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
	if (!ufs_cg_chkmagic(sb, ucg)) {
		ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
		goto failed_unlock;
	}

	for (i = bit; i < end_bit; i += uspi->s_fpb) {
		blkno = ufs_fragstoblks(i);
		if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
			ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
		}
		ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
		if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
			ufs_clusteracct (sb, ucpi, blkno, 1);
		DQUOT_FREE_BLOCK(inode, uspi->s_fpb);

		fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
		uspi->cs_total.cs_nbfree++;
		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);

		if (uspi->fs_magic != UFS2_MAGIC) {
			unsigned cylno = ufs_cbtocylno(i);

			fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
						  ufs_cbtorpos(i)), 1);
			fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
		}
	}

	ubh_mark_buffer_dirty (USPI_UBH(uspi));
	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
	if (sb->s_flags & MS_SYNCHRONOUS) {
		ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
		ubh_wait_on_buffer (UCPI_UBH(ucpi));
	}

	if (overflow) {
		fragment += count;
		count = overflow;
		goto do_more;
	}

	mark_sb_dirty(sb);
	unlock_super (sb);
	UFSD("EXIT\n");
	return;

failed_unlock:
	unlock_super (sb);
failed:
	UFSD("EXIT (FAILED)\n");
	return;
}
Exemple #7
0
/*
 * There are two policies for allocating an inode.  If the new inode is
 * a directory, then a forward search is made for a block group with both
 * free space and a low directory-to-inode ratio; if that fails, then of
 * the groups with above-average free space, that group with the fewest
 * directories already is chosen.
 *
 * For other inodes, search forward from the parent directory's block
 * group to find a free inode.
 */
struct inode * ufs_new_inode(struct inode * dir, int mode)
{
    struct super_block * sb;
    struct ufs_sb_info * sbi;
    struct ufs_sb_private_info * uspi;
    struct ufs_super_block_first * usb1;
    struct ufs_cg_private_info * ucpi;
    struct ufs_cylinder_group * ucg;
    struct inode * inode;
    unsigned cg, bit, i, j, start;
    struct ufs_inode_info *ufsi;
    int err = -ENOSPC;

    UFSD("ENTER\n");
    
    /* Cannot create files in a deleted directory */
    if (!dir || !dir->i_nlink)
        return ERR_PTR(-EPERM);
    sb = dir->i_sb;
    inode = new_inode(sb);
    if (!inode)
        return ERR_PTR(-ENOMEM);
    ufsi = UFS_I(inode);
    sbi = UFS_SB(sb);
    uspi = sbi->s_uspi;
    usb1 = ubh_get_usb_first(uspi);

    lock_super (sb);

    /*
     * Try to place the inode in its parent directory
     */
    i = ufs_inotocg(dir->i_ino);
    if (sbi->fs_cs(i).cs_nifree) {
        cg = i;
        goto cg_found;
    }

    /*
     * Use a quadratic hash to find a group with a free inode
     */
    for ( j = 1; j < uspi->s_ncg; j <<= 1 ) {
        i += j;
        if (i >= uspi->s_ncg)
            i -= uspi->s_ncg;
        if (sbi->fs_cs(i).cs_nifree) {
            cg = i;
            goto cg_found;
        }
    }

    /*
     * That failed: try linear search for a free inode
     */
    i = ufs_inotocg(dir->i_ino) + 1;
    for (j = 2; j < uspi->s_ncg; j++) {
        i++;
        if (i >= uspi->s_ncg)
            i = 0;
        if (sbi->fs_cs(i).cs_nifree) {
            cg = i;
            goto cg_found;
        }
    }

    goto failed;

cg_found:
    ucpi = ufs_load_cylinder (sb, cg);
    if (!ucpi) {
        err = -EIO;
        goto failed;
    }
    ucg = ubh_get_ucg(UCPI_UBH(ucpi));
    if (!ufs_cg_chkmagic(sb, ucg)) 
        ufs_panic (sb, "ufs_new_inode", "internal error, bad cg magic number");

    start = ucpi->c_irotor;
    bit = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_iusedoff, uspi->s_ipg, start);
    if (!(bit < uspi->s_ipg)) {
        bit = ubh_find_first_zero_bit (UCPI_UBH(ucpi), ucpi->c_iusedoff, start);
        if (!(bit < start)) {
            ufs_error (sb, "ufs_new_inode",
                "cylinder group %u corrupted - error in inode bitmap\n", cg);
            err = -EIO;
            goto failed;
        }
    }
    UFSD("start = %u, bit = %u, ipg = %u\n", start, bit, uspi->s_ipg);
    if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_iusedoff, bit))
        ubh_setbit (UCPI_UBH(ucpi), ucpi->c_iusedoff, bit);
    else {
        ufs_panic (sb, "ufs_new_inode", "internal error");
        err = -EIO;
        goto failed;
    }

    if (uspi->fs_magic == UFS2_MAGIC) {
        u32 initediblk = fs32_to_cpu(sb, ucg->cg_u.cg_u2.cg_initediblk);

        if (bit + uspi->s_inopb > initediblk &&
            initediblk < fs32_to_cpu(sb, ucg->cg_u.cg_u2.cg_niblk))
            ufs2_init_inodes_chunk(sb, ucpi, ucg);
    }

    fs32_sub(sb, &ucg->cg_cs.cs_nifree, 1);
    uspi->cs_total.cs_nifree--;
    fs32_sub(sb, &sbi->fs_cs(cg).cs_nifree, 1);
    
    if (S_ISDIR(mode)) {
        fs32_add(sb, &ucg->cg_cs.cs_ndir, 1);
        uspi->cs_total.cs_ndir++;
        fs32_add(sb, &sbi->fs_cs(cg).cs_ndir, 1);
    }
    ubh_mark_buffer_dirty (USPI_UBH(uspi));
    ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
    if (sb->s_flags & MS_SYNCHRONOUS) {
        ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
        ubh_wait_on_buffer (UCPI_UBH(ucpi));
    }
    sb->s_dirt = 1;

    inode->i_ino = cg * uspi->s_ipg + bit;
    inode->i_mode = mode;
    inode->i_uid = current->fsuid;
    if (dir->i_mode & S_ISGID) {
        inode->i_gid = dir->i_gid;
        if (S_ISDIR(mode))
            inode->i_mode |= S_ISGID;
    } else
        inode->i_gid = current->fsgid;

    inode->i_blocks = 0;
    inode->i_generation = 0;
    inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
    ufsi->i_flags = UFS_I(dir)->i_flags;
    ufsi->i_lastfrag = 0;
    ufsi->i_shadow = 0;
    ufsi->i_osync = 0;
    ufsi->i_oeftflag = 0;
    ufsi->i_dir_start_lookup = 0;
    memset(&ufsi->i_u1, 0, sizeof(ufsi->i_u1));
    insert_inode_hash(inode);
    mark_inode_dirty(inode);

    if (uspi->fs_magic == UFS2_MAGIC) {
        struct buffer_head *bh;
        struct ufs2_inode *ufs2_inode;

        /*
         * setup birth date, we do it here because of there is no sense
         * to hold it in struct ufs_inode_info, and lose 64 bit
         */
        bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
        if (!bh) {
            ufs_warning(sb, "ufs_read_inode",
                    "unable to read inode %lu\n",
                    inode->i_ino);
            err = -EIO;
            goto fail_remove_inode;
        }
        lock_buffer(bh);
        ufs2_inode = (struct ufs2_inode *)bh->b_data;
        ufs2_inode += ufs_inotofsbo(inode->i_ino);
        ufs2_inode->ui_birthtime = cpu_to_fs64(sb, CURRENT_TIME.tv_sec);
        ufs2_inode->ui_birthnsec = cpu_to_fs32(sb, CURRENT_TIME.tv_nsec);
        mark_buffer_dirty(bh);
        unlock_buffer(bh);
        if (sb->s_flags & MS_SYNCHRONOUS)
            sync_dirty_buffer(bh);
        brelse(bh);
    }

    unlock_super (sb);

    if (DQUOT_ALLOC_INODE(inode)) {
        DQUOT_DROP(inode);
        err = -EDQUOT;
        goto fail_without_unlock;
    }

    UFSD("allocating inode %lu\n", inode->i_ino);
    UFSD("EXIT\n");
    return inode;

fail_remove_inode:
    unlock_super(sb);
fail_without_unlock:
    inode->i_flags |= S_NOQUOTA;
    inode->i_nlink = 0;
    iput(inode);
    UFSD("EXIT (FAILED): err %d\n", err);
    return ERR_PTR(err);
failed:
    unlock_super (sb);
    make_bad_inode(inode);
    iput (inode);
    UFSD("EXIT (FAILED): err %d\n", err);
    return ERR_PTR(err);
}
Exemple #8
0
/*
 * There are two policies for allocating an inode.  If the new inode is
 * a directory, then a forward search is made for a block group with both
 * free space and a low directory-to-inode ratio; if that fails, then of
 * the groups with above-average free space, that group with the fewest
 * directories already is chosen.
 *
 * For other inodes, search forward from the parent directory's block
 * group to find a free inode.
 */
struct inode * ufs_new_inode(struct inode * dir, int mode)
{
	struct super_block * sb;
	struct ufs_sb_info * sbi;
	struct ufs_sb_private_info * uspi;
	struct ufs_super_block_first * usb1;
	struct ufs_cg_private_info * ucpi;
	struct ufs_cylinder_group * ucg;
	struct inode * inode;
	unsigned cg, bit, i, j, start;
	struct ufs_inode_info *ufsi;

	UFSD(("ENTER\n"))
	
	/* Cannot create files in a deleted directory */
	if (!dir || !dir->i_nlink)
		return ERR_PTR(-EPERM);
	sb = dir->i_sb;
	inode = new_inode(sb);
	if (!inode)
		return ERR_PTR(-ENOMEM);
	ufsi = UFS_I(inode);
	sbi = UFS_SB(sb);
	uspi = sbi->s_uspi;
	usb1 = ubh_get_usb_first(USPI_UBH);

	lock_super (sb);

	/*
	 * Try to place the inode in its parent directory
	 */
	i = ufs_inotocg(dir->i_ino);
	if (sbi->fs_cs(i).cs_nifree) {
		cg = i;
		goto cg_found;
	}

	/*
	 * Use a quadratic hash to find a group with a free inode
	 */
	for ( j = 1; j < uspi->s_ncg; j <<= 1 ) {
		i += j;
		if (i >= uspi->s_ncg)
			i -= uspi->s_ncg;
		if (sbi->fs_cs(i).cs_nifree) {
			cg = i;
			goto cg_found;
		}
	}

	/*
	 * That failed: try linear search for a free inode
	 */
	i = ufs_inotocg(dir->i_ino) + 1;
	for (j = 2; j < uspi->s_ncg; j++) {
		i++;
		if (i >= uspi->s_ncg)
			i = 0;
		if (sbi->fs_cs(i).cs_nifree) {
			cg = i;
			goto cg_found;
		}
	}
	
	goto failed;

cg_found:
	ucpi = ufs_load_cylinder (sb, cg);
	if (!ucpi)
		goto failed;
	ucg = ubh_get_ucg(UCPI_UBH);
	if (!ufs_cg_chkmagic(sb, ucg)) 
		ufs_panic (sb, "ufs_new_inode", "internal error, bad cg magic number");

	start = ucpi->c_irotor;
	bit = ubh_find_next_zero_bit (UCPI_UBH, ucpi->c_iusedoff, uspi->s_ipg, start);
	if (!(bit < uspi->s_ipg)) {
		bit = ubh_find_first_zero_bit (UCPI_UBH, ucpi->c_iusedoff, start);
		if (!(bit < start)) {
			ufs_error (sb, "ufs_new_inode",
			    "cylinder group %u corrupted - error in inode bitmap\n", cg);
			goto failed;
		}
	}
	UFSD(("start = %u, bit = %u, ipg = %u\n", start, bit, uspi->s_ipg))
	if (ubh_isclr (UCPI_UBH, ucpi->c_iusedoff, bit))
		ubh_setbit (UCPI_UBH, ucpi->c_iusedoff, bit);
	else {
		ufs_panic (sb, "ufs_new_inode", "internal error");
		goto failed;
	}
	
	fs32_sub(sb, &ucg->cg_cs.cs_nifree, 1);
	fs32_sub(sb, &usb1->fs_cstotal.cs_nifree, 1);
	fs32_sub(sb, &sbi->fs_cs(cg).cs_nifree, 1);
	
	if (S_ISDIR(mode)) {
		fs32_add(sb, &ucg->cg_cs.cs_ndir, 1);
		fs32_add(sb, &usb1->fs_cstotal.cs_ndir, 1);
		fs32_add(sb, &sbi->fs_cs(cg).cs_ndir, 1);
	}

	ubh_mark_buffer_dirty (USPI_UBH);
	ubh_mark_buffer_dirty (UCPI_UBH);
	if (sb->s_flags & MS_SYNCHRONOUS) {
		ubh_wait_on_buffer (UCPI_UBH);
		ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **) &ucpi);
		ubh_wait_on_buffer (UCPI_UBH);
	}
	sb->s_dirt = 1;

	inode->i_mode = mode;
	inode->i_uid = current->fsuid;
	if (dir->i_mode & S_ISGID) {
		inode->i_gid = dir->i_gid;
		if (S_ISDIR(mode))
			inode->i_mode |= S_ISGID;
	} else
		inode->i_gid = current->fsgid;

	inode->i_ino = cg * uspi->s_ipg + bit;
	inode->i_blksize = PAGE_SIZE;	/* This is the optimal IO size (for stat), not the fs block size */
	inode->i_blocks = 0;
	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
	ufsi->i_flags = UFS_I(dir)->i_flags;
	ufsi->i_lastfrag = 0;
	ufsi->i_gen = 0;
	ufsi->i_shadow = 0;
	ufsi->i_osync = 0;
	ufsi->i_oeftflag = 0;
	memset(&ufsi->i_u1, 0, sizeof(ufsi->i_u1));

	insert_inode_hash(inode);
	mark_inode_dirty(inode);

	unlock_super (sb);

	if (DQUOT_ALLOC_INODE(inode)) {
		DQUOT_DROP(inode);
		inode->i_flags |= S_NOQUOTA;
		inode->i_nlink = 0;
		iput(inode);
		return ERR_PTR(-EDQUOT);
	}

	UFSD(("allocating inode %lu\n", inode->i_ino))
	UFSD(("EXIT\n"))
	return inode;

failed:
	unlock_super (sb);
	make_bad_inode(inode);
	iput (inode);
	UFSD(("EXIT (FAILED)\n"))
	return ERR_PTR(-ENOSPC);
}
Exemple #9
0
/*
 * Free 'count' fragments from fragment number 'fragment'
 */
void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count) {
	struct super_block * sb;
	struct ufs_sb_private_info * uspi;
	struct ufs_super_block_first * usb1;
	struct ufs_cg_private_info * ucpi;
	struct ufs_cylinder_group * ucg;
	unsigned cgno, bit, end_bit, bbase, blkmap, i, blkno, cylno;
	unsigned swab;
	
	sb = inode->i_sb;
	uspi = sb->u.ufs_sb.s_uspi;
	swab = sb->u.ufs_sb.s_swab;
	usb1 = ubh_get_usb_first(USPI_UBH);
	
	UFSD(("ENTER, fragment %u, count %u\n", fragment, count))
	
	if (ufs_fragnum(fragment) + count > uspi->s_fpg)
		ufs_error (sb, "ufs_free_fragments", "internal error");
	
	lock_super(sb);
	
	cgno = ufs_dtog(fragment);
	bit = ufs_dtogd(fragment);
	if (cgno >= uspi->s_ncg) {
		ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
		goto failed;
	}
		
	ucpi = ufs_load_cylinder (sb, cgno);
	if (!ucpi) 
		goto failed;
	ucg = ubh_get_ucg (UCPI_UBH);
	if (!ufs_cg_chkmagic (ucg)) {
		ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
		goto failed;
	}

	end_bit = bit + count;
	bbase = ufs_blknum (bit);
	blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase);
	ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
	for (i = bit; i < end_bit; i++) {
		if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, i))
			ubh_setbit (UCPI_UBH, ucpi->c_freeoff, i);
		else ufs_error (sb, "ufs_free_fragments",
			"bit already cleared for fragment %u", i);
	}
	
	DQUOT_FREE_BLOCK (sb, inode, count);
	ADD_SWAB32(ucg->cg_cs.cs_nffree, count);
	ADD_SWAB32(usb1->fs_cstotal.cs_nffree, count);
	ADD_SWAB32(sb->fs_cs(cgno).cs_nffree, count);
	blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase);
	ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);

	/*
	 * Trying to reassemble free fragments into block
	 */
	blkno = ufs_fragstoblks (bbase);
	if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) {
		SUB_SWAB32(ucg->cg_cs.cs_nffree, uspi->s_fpb);
		SUB_SWAB32(usb1->fs_cstotal.cs_nffree, uspi->s_fpb);
		SUB_SWAB32(sb->fs_cs(cgno).cs_nffree, uspi->s_fpb);
		if ((sb->u.ufs_sb.s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
			ufs_clusteracct (sb, ucpi, blkno, 1);
		INC_SWAB32(ucg->cg_cs.cs_nbfree);
		INC_SWAB32(usb1->fs_cstotal.cs_nbfree);
		INC_SWAB32(sb->fs_cs(cgno).cs_nbfree);
		cylno = ufs_cbtocylno (bbase);
		INC_SWAB16(ubh_cg_blks (ucpi, cylno, ufs_cbtorpos(bbase)));
		INC_SWAB32(ubh_cg_blktot (ucpi, cylno));
	}
	
	ubh_mark_buffer_dirty (USPI_UBH);
	ubh_mark_buffer_dirty (UCPI_UBH);
	if (sb->s_flags & MS_SYNCHRONOUS) {
		ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
		ubh_wait_on_buffer (UCPI_UBH);
	}
	sb->s_dirt = 1;
	
	unlock_super (sb);
	UFSD(("EXIT\n"))
	return;

failed:
	unlock_super (sb);
	UFSD(("EXIT (FAILED)\n"))
	return;
}
Exemple #10
0
/*
 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
 */
void ufs_free_blocks (struct inode * inode, unsigned fragment, unsigned count) {
	struct super_block * sb;
	struct ufs_sb_private_info * uspi;
	struct ufs_super_block_first * usb1;
	struct ufs_cg_private_info * ucpi;
	struct ufs_cylinder_group * ucg;
	unsigned overflow, cgno, bit, end_bit, blkno, i, cylno;
	unsigned swab;
	
	sb = inode->i_sb;
	uspi = sb->u.ufs_sb.s_uspi;
	swab = sb->u.ufs_sb.s_swab;
	usb1 = ubh_get_usb_first(USPI_UBH);

	UFSD(("ENTER, fragment %u, count %u\n", fragment, count))
	
	if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
		ufs_error (sb, "ufs_free_blocks", "internal error, "
			"fragment %u, count %u\n", fragment, count);
		goto failed;
	}

	lock_super(sb);
	
do_more:
	overflow = 0;
	cgno = ufs_dtog (fragment);
	bit = ufs_dtogd (fragment);
	if (cgno >= uspi->s_ncg) {
		ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
		goto failed;
	}
	end_bit = bit + count;
	if (end_bit > uspi->s_fpg) {
		overflow = bit + count - uspi->s_fpg;
		count -= overflow;
		end_bit -= overflow;
	}

	ucpi = ufs_load_cylinder (sb, cgno);
	if (!ucpi) 
		goto failed;
	ucg = ubh_get_ucg (UCPI_UBH);
	if (!ufs_cg_chkmagic (ucg)) {
		ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
		goto failed;
	}

	for (i = bit; i < end_bit; i += uspi->s_fpb) {
		blkno = ufs_fragstoblks(i);
		if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) {
			ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
		}
		ubh_setblock(UCPI_UBH, ucpi->c_freeoff, blkno);
		if ((sb->u.ufs_sb.s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
			ufs_clusteracct (sb, ucpi, blkno, 1);
		DQUOT_FREE_BLOCK(sb, inode, uspi->s_fpb);
		INC_SWAB32(ucg->cg_cs.cs_nbfree);
		INC_SWAB32(usb1->fs_cstotal.cs_nbfree);
		INC_SWAB32(sb->fs_cs(cgno).cs_nbfree);
		cylno = ufs_cbtocylno(i);
		INC_SWAB16(ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(i)));
		INC_SWAB32(ubh_cg_blktot(ucpi, cylno));
	}

	ubh_mark_buffer_dirty (USPI_UBH);
	ubh_mark_buffer_dirty (UCPI_UBH);
	if (sb->s_flags & MS_SYNCHRONOUS) {
		ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
		ubh_wait_on_buffer (UCPI_UBH);
	}

	if (overflow) {
		fragment += count;
		count = overflow;
		goto do_more;
	}

	sb->s_dirt = 1;
	unlock_super (sb);
	UFSD(("EXIT\n"))
	return;

failed:
	unlock_super (sb);
	UFSD(("EXIT (FAILED)\n"))
	return;
}
Exemple #11
0
/*
 * Read on-disk structures associated with cylinder groups
 */
static int ufs_read_cylinder_structures (struct super_block *sb) {
	struct ufs_sb_info * sbi = UFS_SB(sb);
	struct ufs_sb_private_info * uspi;
	struct ufs_super_block *usb;
	struct ufs_buffer_head * ubh;
	unsigned char * base, * space;
	unsigned size, blks, i;
	unsigned flags = 0;
	
	UFSD(("ENTER\n"))
	
	uspi = sbi->s_uspi;

	usb  = (struct ufs_super_block *)
		((struct ufs_buffer_head *)uspi)->bh[0]->b_data;

        flags = UFS_SB(sb)->s_flags;
	
	/*
	 * Read cs structures from (usually) first data block
	 * on the device. 
	 */
	size = uspi->s_cssize;
	blks = (size + uspi->s_fsize - 1) >> uspi->s_fshift;
	base = space = kmalloc(size, GFP_KERNEL);
	if (!base)
		goto failed; 
	for (i = 0; i < blks; i += uspi->s_fpb) {
		size = uspi->s_bsize;
		if (i + uspi->s_fpb > blks)
			size = (blks - i) * uspi->s_fsize;

		if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
			ubh = ubh_bread(sb,
				fs64_to_cpu(sb, usb->fs_u11.fs_u2.fs_csaddr) + i, size);
			if (!ubh)
				goto failed;
			ubh_ubhcpymem (space, ubh, size);
			sbi->s_csp[ufs_fragstoblks(i)]=(struct ufs_csum *)space;
		}
		else {
			ubh = ubh_bread(sb, uspi->s_csaddr + i, size);
			if (!ubh)
				goto failed;
			ubh_ubhcpymem(space, ubh, size);
			sbi->s_csp[ufs_fragstoblks(i)]=(struct ufs_csum *)space;
		}
		space += size;
		ubh_brelse (ubh);
		ubh = NULL;
	}

	/*
	 * Read cylinder group (we read only first fragment from block
	 * at this time) and prepare internal data structures for cg caching.
	 */
	if (!(sbi->s_ucg = kmalloc (sizeof(struct buffer_head *) * uspi->s_ncg, GFP_KERNEL)))
		goto failed;
	for (i = 0; i < uspi->s_ncg; i++) 
		sbi->s_ucg[i] = NULL;
	for (i = 0; i < UFS_MAX_GROUP_LOADED; i++) {
		sbi->s_ucpi[i] = NULL;
		sbi->s_cgno[i] = UFS_CGNO_EMPTY;
	}
	for (i = 0; i < uspi->s_ncg; i++) {
		UFSD(("read cg %u\n", i))
		if (!(sbi->s_ucg[i] = sb_bread(sb, ufs_cgcmin(i))))
			goto failed;
		if (!ufs_cg_chkmagic (sb, (struct ufs_cylinder_group *) sbi->s_ucg[i]->b_data))
			goto failed;
#ifdef UFS_SUPER_DEBUG_MORE
		ufs_print_cylinder_stuff(sb, (struct ufs_cylinder_group *) sbi->s_ucg[i]->b_data);
#endif
	}
	for (i = 0; i < UFS_MAX_GROUP_LOADED; i++) {
		if (!(sbi->s_ucpi[i] = kmalloc (sizeof(struct ufs_cg_private_info), GFP_KERNEL)))
			goto failed;
		sbi->s_cgno[i] = UFS_CGNO_EMPTY;
	}
	sbi->s_cg_loaded = 0;
	UFSD(("EXIT\n"))
	return 1;

failed:
	if (base) kfree (base);
	if (sbi->s_ucg) {
		for (i = 0; i < uspi->s_ncg; i++)
			if (sbi->s_ucg[i]) brelse (sbi->s_ucg[i]);
		kfree (sbi->s_ucg);
		for (i = 0; i < UFS_MAX_GROUP_LOADED; i++)
			if (sbi->s_ucpi[i]) kfree (sbi->s_ucpi[i]);
	}
	UFSD(("EXIT (FAILED)\n"))
	return 0;
}