Example #1
0
static sector_t find_block(struct inode *inode, struct omfs_extent_entry *ent,
			sector_t block, int count, int *left)
{
	/* count > 1 because of terminator */
	sector_t searched = 0;
	for (; count > 1; count--) {
		int numblocks = clus_to_blk(OMFS_SB(inode->i_sb),
			be64_to_cpu(ent->e_blocks));

		if (block >= searched  &&
		    block < searched + numblocks) {
			/*
			 * found it at cluster + (block - searched)
			 * numblocks - (block - searched) is remainder
			 */
			*left = numblocks - (block - searched);
			return clus_to_blk(OMFS_SB(inode->i_sb),
				be64_to_cpu(ent->e_cluster)) +
				block - searched;
		}
		searched += numblocks;
		ent++;
	}
	return 0;
}
Example #2
0
/*
 * Tries to allocate exactly one block.  Returns true if successful.
 */
int omfs_allocate_block(struct super_block *sb, u64 block)
{
	struct buffer_head *bh;
	struct omfs_sb_info *sbi = OMFS_SB(sb);
	int bits_per_entry = 8 * sb->s_blocksize;
	unsigned int map, bit;
	int ret = 0;
	u64 tmp;

	tmp = block;
	bit = do_div(tmp, bits_per_entry);
	map = tmp;

	mutex_lock(&sbi->s_bitmap_lock);
	if (map >= sbi->s_imap_size || test_and_set_bit(bit, sbi->s_imap[map]))
		goto out;

	if (sbi->s_bitmap_ino > 0) {
		bh = sb_bread(sb, clus_to_blk(sbi, sbi->s_bitmap_ino) + map);
		if (!bh)
			goto out;

		set_bit(bit, (unsigned long *)bh->b_data);
		mark_buffer_dirty(bh);
		brelse(bh);
	}
	ret = 1;
out:
	mutex_unlock(&sbi->s_bitmap_lock);
	return ret;
}
Example #3
0
static void omfs_put_super(struct super_block *sb)
{
	struct omfs_sb_info *sbi = OMFS_SB(sb);
	kfree(sbi->s_imap);
	kfree(sbi);
	sb->s_fs_info = NULL;
}
Example #4
0
struct buffer_head *omfs_bread(struct super_block *sb, sector_t block)
{
	struct omfs_sb_info *sbi = OMFS_SB(sb);
	if (block >= sbi->s_num_blocks)
		return NULL;

	return sb_bread(sb, clus_to_blk(sbi, block));
}
/*
 * For Rio Karma, there is an on-disk free bitmap whose location is
 * stored in the root block.  For ReplayTV, there is no such free bitmap
 * so we have to walk the tree.  Both inodes and file data are allocated
 * from the same map.  This array can be big (300k) so we allocate
 * in units of the blocksize.
 */
static int omfs_get_imap(struct super_block *sb)
{
	int bitmap_size;
	int array_size;
	int count;
	struct omfs_sb_info *sbi = OMFS_SB(sb);
	struct buffer_head *bh;
	unsigned long **ptr;
	sector_t block;

	bitmap_size = DIV_ROUND_UP(sbi->s_num_blocks, 8);
	array_size = DIV_ROUND_UP(bitmap_size, sb->s_blocksize);

	if (sbi->s_bitmap_ino == ~0ULL)
		goto out;

	sbi->s_imap_size = array_size;
	sbi->s_imap = kzalloc(array_size * sizeof(unsigned long *), GFP_KERNEL);
	if (!sbi->s_imap)
		goto nomem;

	block = clus_to_blk(sbi, sbi->s_bitmap_ino);
	if (block >= sbi->s_num_blocks)
		goto nomem;

	ptr = sbi->s_imap;
	for (count = bitmap_size; count > 0; count -= sb->s_blocksize) {
		bh = sb_bread(sb, block++);
		if (!bh)
			goto nomem_free;
		*ptr = kmalloc(sb->s_blocksize, GFP_KERNEL);
		if (!*ptr) {
			brelse(bh);
			goto nomem_free;
		}
		memcpy(*ptr, bh->b_data, sb->s_blocksize);
		if (count < sb->s_blocksize)
			memset((void *)*ptr + count, 0xff,
				sb->s_blocksize - count);
		brelse(bh);
		ptr++;
	}
out:
	return 0;

nomem_free:
	for (count = 0; count < array_size; count++)
		kfree(sbi->s_imap[count]);

	kfree(sbi->s_imap);
nomem:
	sbi->s_imap = NULL;
	sbi->s_imap_size = 0;
	return -ENOMEM;
}
Example #6
0
unsigned long omfs_count_free(struct super_block *sb)
{
	unsigned int i;
	unsigned long sum = 0;
	struct omfs_sb_info *sbi = OMFS_SB(sb);
	int nbits = sb->s_blocksize * 8;

	for (i = 0; i < sbi->s_imap_size; i++)
		sum += nbits - bitmap_weight(sbi->s_imap[i], nbits);

	return sum;
}
Example #7
0
static int omfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
	struct super_block *s = dentry->d_sb;
	struct omfs_sb_info *sbi = OMFS_SB(s);
	buf->f_type = OMFS_MAGIC;
	buf->f_bsize = sbi->s_blocksize;
	buf->f_blocks = sbi->s_num_blocks;
	buf->f_files = sbi->s_num_blocks;
	buf->f_namelen = OMFS_NAMELEN;

	buf->f_bfree = buf->f_bavail = buf->f_ffree =
		omfs_count_free(s);
	return 0;
}
Example #8
0
struct inode *omfs_new_inode(struct inode *dir, int mode)
{
	struct inode *inode;
	u64 new_block;
	int err;
	int len;
	struct omfs_sb_info *sbi = OMFS_SB(dir->i_sb);

	inode = new_inode(dir->i_sb);
	if (!inode)
		return ERR_PTR(-ENOMEM);

	err = omfs_allocate_range(dir->i_sb, sbi->s_mirrors, sbi->s_mirrors,
			&new_block, &len);
	if (err)
		goto fail;

	inode->i_ino = new_block;
	inode->i_mode = mode;
	inode->i_uid = current_fsuid();
	inode->i_gid = current_fsgid();
	inode->i_mapping->a_ops = &omfs_aops;

	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
	switch (mode & S_IFMT) {
	case S_IFDIR:
		inode->i_op = &omfs_dir_inops;
		inode->i_fop = &omfs_dir_operations;
		inode->i_size = sbi->s_sys_blocksize;
		inc_nlink(inode);
		break;
	case S_IFREG:
		inode->i_op = &omfs_file_inops;
		inode->i_fop = &omfs_file_operations;
		inode->i_size = 0;
		break;
	}

	insert_inode_hash(inode);
	mark_inode_dirty(inode);
	return inode;
fail:
	make_bad_inode(inode);
	iput(inode);
	return ERR_PTR(err);
}
Example #9
0
static int omfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
	struct super_block *s = dentry->d_sb;
	struct omfs_sb_info *sbi = OMFS_SB(s);
	u64 id = huge_encode_dev(s->s_bdev->bd_dev);

	buf->f_type = OMFS_MAGIC;
	buf->f_bsize = sbi->s_blocksize;
	buf->f_blocks = sbi->s_num_blocks;
	buf->f_files = sbi->s_num_blocks;
	buf->f_namelen = OMFS_NAMELEN;
	buf->f_fsid.val[0] = (u32)id;
	buf->f_fsid.val[1] = (u32)(id >> 32);

	buf->f_bfree = buf->f_bavail = buf->f_ffree =
		omfs_count_free(s);

	return 0;
}
Example #10
0
/*
 * Clears count bits starting at a given block.
 */
int omfs_clear_range(struct super_block *sb, u64 block, int count)
{
	struct omfs_sb_info *sbi = OMFS_SB(sb);
	int bits_per_entry = 8 * sb->s_blocksize;
	u64 tmp;
	unsigned int map, bit;
	int ret;

	tmp = block;
	bit = do_div(tmp, bits_per_entry);
	map = tmp;

	if (map >= sbi->s_imap_size)
		return 0;

	mutex_lock(&sbi->s_bitmap_lock);
	ret = set_run(sb, map, bits_per_entry, bit, count, 0);
	mutex_unlock(&sbi->s_bitmap_lock);
	return ret;
}
Example #11
0
/*
 *  Tries to allocate a set of blocks.	The request size depends on the
 *  type: for inodes, we must allocate sbi->s_mirrors blocks, and for file
 *  blocks, we try to allocate sbi->s_clustersize, but can always get away
 *  with just one block.
 */
int omfs_allocate_range(struct super_block *sb,
			int min_request,
			int max_request,
			u64 *return_block,
			int *return_size)
{
	struct omfs_sb_info *sbi = OMFS_SB(sb);
	int bits_per_entry = 8 * sb->s_blocksize;
	int ret = 0;
	int i, run, bit;

	mutex_lock(&sbi->s_bitmap_lock);
	for (i = 0; i < sbi->s_imap_size; i++) {
		bit = 0;
		while (bit < bits_per_entry) {
			bit = find_next_zero_bit(sbi->s_imap[i], bits_per_entry,
				bit);

			if (bit == bits_per_entry)
				break;

			run = count_run(&sbi->s_imap[i], bits_per_entry,
				sbi->s_imap_size-i, bit, max_request);

			if (run >= min_request)
				goto found;
			bit += run;
		}
	}
	ret = -ENOSPC;
	goto out;

found:
	*return_block = i * bits_per_entry + bit;
	*return_size = run;
	ret = set_run(sb, i, bits_per_entry, bit, run, 1);

out:
	mutex_unlock(&sbi->s_bitmap_lock);
	return ret;
}
Example #12
0
/*
 * Sets or clears the run of count bits starting with bit.
 * Called with bitmap lock.
 */
static int set_run(struct super_block *sb, int map,
		int nbits, int bit, int count, int set)
{
	int i;
	int err;
	struct buffer_head *bh;
	struct omfs_sb_info *sbi = OMFS_SB(sb);

 	err = -ENOMEM;
	bh = sb_bread(sb, clus_to_blk(sbi, sbi->s_bitmap_ino) + map);
	if (!bh)
		goto out;

	for (i = 0; i < count; i++, bit++) {
		if (bit >= nbits) {
			bit = 0;
			map++;

			mark_buffer_dirty(bh);
			brelse(bh);
			bh = sb_bread(sb,
				clus_to_blk(sbi, sbi->s_bitmap_ino) + map);
			if (!bh)
				goto out;
		}
		if (set) {
			set_bit(bit, sbi->s_imap[map]);
			set_bit(bit, (unsigned long *)bh->b_data);
		} else {
			clear_bit(bit, sbi->s_imap[map]);
			clear_bit(bit, (unsigned long *)bh->b_data);
		}
	}
	mark_buffer_dirty(bh);
	brelse(bh);
	err = 0;
out:
	return err;
}
Example #13
0
/*
 * Display the mount options in /proc/mounts.
 */
static int omfs_show_options(struct seq_file *m, struct dentry *root)
{
	struct omfs_sb_info *sbi = OMFS_SB(root->d_sb);
	umode_t cur_umask = current_umask();

	if (!uid_eq(sbi->s_uid, current_uid()))
		seq_printf(m, ",uid=%u",
			   from_kuid_munged(&init_user_ns, sbi->s_uid));
	if (!gid_eq(sbi->s_gid, current_gid()))
		seq_printf(m, ",gid=%u",
			   from_kgid_munged(&init_user_ns, sbi->s_gid));

	if (sbi->s_dmask == sbi->s_fmask) {
		if (sbi->s_fmask != cur_umask)
			seq_printf(m, ",umask=%o", sbi->s_fmask);
	} else {
		if (sbi->s_dmask != cur_umask)
			seq_printf(m, ",dmask=%o", sbi->s_dmask);
		if (sbi->s_fmask != cur_umask)
			seq_printf(m, ",fmask=%o", sbi->s_fmask);
	}

	return 0;
}
Example #14
0
static int omfs_grow_extent(struct inode *inode, struct omfs_extent *oe,
			u64 *ret_block)
{
	struct omfs_extent_entry *terminator;
	struct omfs_extent_entry *entry = &oe->e_entry;
	struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb);
	u32 extent_count = be32_to_cpu(oe->e_extent_count);
	u64 new_block = 0;
	u32 max_count;
	int new_count;
	int ret = 0;

	/* reached the end of the extent table with no blocks mapped.
	 * there are three possibilities for adding: grow last extent,
	 * add a new extent to the current extent table, and add a
	 * continuation inode.  in last two cases need an allocator for
	 * sbi->s_cluster_size
	 */

	/* TODO: handle holes */

	/* should always have a terminator */
	if (extent_count < 1)
		return -EIO;

	/* trivially grow current extent, if next block is not taken */
	terminator = entry + extent_count - 1;
	if (extent_count > 1) {
		entry = terminator-1;
		new_block = be64_to_cpu(entry->e_cluster) +
			be64_to_cpu(entry->e_blocks);

		if (omfs_allocate_block(inode->i_sb, new_block)) {
			entry->e_blocks =
				cpu_to_be64(be64_to_cpu(entry->e_blocks) + 1);
			terminator->e_blocks = ~(cpu_to_be64(
				be64_to_cpu(~terminator->e_blocks) + 1));
			goto out;
		}
	}
	max_count = omfs_max_extents(sbi, OMFS_EXTENT_START);

	/* TODO: add a continuation block here */
	if (be32_to_cpu(oe->e_extent_count) > max_count-1)
		return -EIO;

	/* try to allocate a new cluster */
	ret = omfs_allocate_range(inode->i_sb, 1, sbi->s_clustersize,
		&new_block, &new_count);
	if (ret)
		goto out_fail;

	/* copy terminator down an entry */
	entry = terminator;
	terminator++;
	memcpy(terminator, entry, sizeof(struct omfs_extent_entry));

	entry->e_cluster = cpu_to_be64(new_block);
	entry->e_blocks = cpu_to_be64((u64) new_count);

	terminator->e_blocks = ~(cpu_to_be64(
		be64_to_cpu(~terminator->e_blocks) + (u64) new_count));

	/* write in new entry */
	oe->e_extent_count = cpu_to_be32(1 + be32_to_cpu(oe->e_extent_count));

out:
	*ret_block = new_block;
out_fail:
	return ret;
}
Example #15
0
static int omfs_get_block(struct inode *inode, sector_t block,
			  struct buffer_head *bh_result, int create)
{
	struct buffer_head *bh;
	sector_t next, offset;
	int ret;
	u64 new_block;
	u32 max_extents;
	int extent_count;
	struct omfs_extent *oe;
	struct omfs_extent_entry *entry;
	struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb);
	int max_blocks = bh_result->b_size >> inode->i_blkbits;
	int remain;

	ret = -EIO;
	bh = sb_bread(inode->i_sb, clus_to_blk(sbi, inode->i_ino));
	if (!bh)
		goto out;

	oe = (struct omfs_extent *)(&bh->b_data[OMFS_EXTENT_START]);
	max_extents = omfs_max_extents(sbi, OMFS_EXTENT_START);
	next = inode->i_ino;

	for (;;) {

		if (omfs_is_bad(sbi, (struct omfs_header *) bh->b_data, next))
			goto out_brelse;

		extent_count = be32_to_cpu(oe->e_extent_count);
		next = be64_to_cpu(oe->e_next);
		entry = &oe->e_entry;

		if (extent_count > max_extents)
			goto out_brelse;

		offset = find_block(inode, entry, block, extent_count, &remain);
		if (offset > 0) {
			ret = 0;
			map_bh(bh_result, inode->i_sb, offset);
			if (remain > max_blocks)
				remain = max_blocks;
			bh_result->b_size = (remain << inode->i_blkbits);
			goto out_brelse;
		}
		if (next == ~0)
			break;

		brelse(bh);
		bh = sb_bread(inode->i_sb, clus_to_blk(sbi, next));
		if (!bh)
			goto out;
		oe = (struct omfs_extent *) (&bh->b_data[OMFS_EXTENT_CONT]);
		max_extents = omfs_max_extents(sbi, OMFS_EXTENT_CONT);
	}
	if (create) {
		ret = omfs_grow_extent(inode, oe, &new_block);
		if (ret == 0) {
			mark_buffer_dirty(bh);
			mark_inode_dirty(inode);
			map_bh(bh_result, inode->i_sb,
					clus_to_blk(sbi, new_block));
		}
	}
out_brelse:
	brelse(bh);
out:
	return ret;
}
Example #16
0
static int __omfs_write_inode(struct inode *inode, int wait)
{
	struct omfs_inode *oi;
	struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb);
	struct buffer_head *bh, *bh2;
	u64 ctime;
	int i;
	int ret = -EIO;
	int sync_failed = 0;

	/* get current inode since we may have written sibling ptrs etc. */
	bh = omfs_bread(inode->i_sb, inode->i_ino);
	if (!bh)
		goto out;

	oi = (struct omfs_inode *) bh->b_data;

	oi->i_head.h_self = cpu_to_be64(inode->i_ino);
	if (S_ISDIR(inode->i_mode))
		oi->i_type = OMFS_DIR;
	else if (S_ISREG(inode->i_mode))
		oi->i_type = OMFS_FILE;
	else {
		printk(KERN_WARNING "omfs: unknown file type: %d\n",
			inode->i_mode);
		goto out_brelse;
	}

	oi->i_head.h_body_size = cpu_to_be32(sbi->s_sys_blocksize -
		sizeof(struct omfs_header));
	oi->i_head.h_version = 1;
	oi->i_head.h_type = OMFS_INODE_NORMAL;
	oi->i_head.h_magic = OMFS_IMAGIC;
	oi->i_size = cpu_to_be64(inode->i_size);

	ctime = inode->i_ctime.tv_sec * 1000LL +
		((inode->i_ctime.tv_nsec + 999)/1000);
	oi->i_ctime = cpu_to_be64(ctime);

	omfs_update_checksums(oi);

	mark_buffer_dirty(bh);
	if (wait) {
		sync_dirty_buffer(bh);
		if (buffer_req(bh) && !buffer_uptodate(bh))
			sync_failed = 1;
	}

	/* if mirroring writes, copy to next fsblock */
	for (i = 1; i < sbi->s_mirrors; i++) {
		bh2 = omfs_bread(inode->i_sb, inode->i_ino + i);
		if (!bh2)
			goto out_brelse;

		memcpy(bh2->b_data, bh->b_data, bh->b_size);
		mark_buffer_dirty(bh2);
		if (wait) {
			sync_dirty_buffer(bh2);
			if (buffer_req(bh2) && !buffer_uptodate(bh2))
				sync_failed = 1;
		}
		brelse(bh2);
	}
	ret = (sync_failed) ? -EIO : 0;
out_brelse:
	brelse(bh);
out:
	return ret;
}
Example #17
0
int omfs_shrink_inode(struct inode *inode)
{
	struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb);
	struct omfs_extent *oe;
	struct omfs_extent_entry *entry;
	struct buffer_head *bh;
	u64 next, last;
	u32 extent_count;
	u32 max_extents;
	int ret;

	/* traverse extent table, freeing each entry that is greater
	 * than inode->i_size;
	 */
	next = inode->i_ino;

	/* only support truncate -> 0 for now */
	ret = -EIO;
	if (inode->i_size != 0)
		goto out;

	bh = sb_bread(inode->i_sb, clus_to_blk(sbi, next));
	if (!bh)
		goto out;

	oe = (struct omfs_extent *)(&bh->b_data[OMFS_EXTENT_START]);
	max_extents = omfs_max_extents(sbi, OMFS_EXTENT_START);

	for (;;) {

		if (omfs_is_bad(sbi, (struct omfs_header *) bh->b_data, next))
			goto out_brelse;

		extent_count = be32_to_cpu(oe->e_extent_count);

		if (extent_count > max_extents)
			goto out_brelse;

		last = next;
		next = be64_to_cpu(oe->e_next);
		entry = &oe->e_entry;

		/* ignore last entry as it is the terminator */
		for (; extent_count > 1; extent_count--) {
			u64 start, count;
			start = be64_to_cpu(entry->e_cluster);
			count = be64_to_cpu(entry->e_blocks);

			omfs_clear_range(inode->i_sb, start, (int) count);
			entry++;
		}
		omfs_make_empty_table(bh, (char *) oe - bh->b_data);
		mark_buffer_dirty(bh);
		brelse(bh);

		if (last != inode->i_ino)
			omfs_clear_range(inode->i_sb, last, sbi->s_mirrors);

		if (next == ~0)
			break;

		bh = sb_bread(inode->i_sb, clus_to_blk(sbi, next));
		if (!bh)
			goto out;
		oe = (struct omfs_extent *) (&bh->b_data[OMFS_EXTENT_CONT]);
		max_extents = omfs_max_extents(sbi, OMFS_EXTENT_CONT);
	}
	ret = 0;
out:
	return ret;
out_brelse:
	brelse(bh);
	return ret;
}
Example #18
0
struct inode *omfs_iget(struct super_block *sb, ino_t ino)
{
	struct omfs_sb_info *sbi = OMFS_SB(sb);
	struct omfs_inode *oi;
	struct buffer_head *bh;
	u64 ctime;
	unsigned long nsecs;
	struct inode *inode;

	inode = iget_locked(sb, ino);
	if (!inode)
		return ERR_PTR(-ENOMEM);
	if (!(inode->i_state & I_NEW))
		return inode;

	bh = omfs_bread(inode->i_sb, ino);
	if (!bh)
		goto iget_failed;

	oi = (struct omfs_inode *)bh->b_data;

	/* check self */
	if (ino != be64_to_cpu(oi->i_head.h_self))
		goto fail_bh;

	inode->i_uid = sbi->s_uid;
	inode->i_gid = sbi->s_gid;

	ctime = be64_to_cpu(oi->i_ctime);
	nsecs = do_div(ctime, 1000) * 1000L;

	inode->i_atime.tv_sec = ctime;
	inode->i_mtime.tv_sec = ctime;
	inode->i_ctime.tv_sec = ctime;
	inode->i_atime.tv_nsec = nsecs;
	inode->i_mtime.tv_nsec = nsecs;
	inode->i_ctime.tv_nsec = nsecs;

	inode->i_mapping->a_ops = &omfs_aops;

	switch (oi->i_type) {
	case OMFS_DIR:
		inode->i_mode = S_IFDIR | (S_IRWXUGO & ~sbi->s_dmask);
		inode->i_op = &omfs_dir_inops;
		inode->i_fop = &omfs_dir_operations;
		inode->i_size = sbi->s_sys_blocksize;
		inc_nlink(inode);
		break;
	case OMFS_FILE:
		inode->i_mode = S_IFREG | (S_IRWXUGO & ~sbi->s_fmask);
		inode->i_fop = &omfs_file_operations;
		inode->i_size = be64_to_cpu(oi->i_size);
		break;
	}
	brelse(bh);
	unlock_new_inode(inode);
	return inode;
fail_bh:
	brelse(bh);
iget_failed:
	iget_failed(inode);
	return ERR_PTR(-EIO);
}