示例#1
0
struct inode *omfs_new_inode(struct inode *dir, int mode)
{
	struct inode *inode;
	u64 new_block;
	int err;
	int len;
	struct omfs_sb_info *sbi = OMFS_SB(dir->i_sb);

	inode = new_inode(dir->i_sb);
	if (!inode)
		return ERR_PTR(-ENOMEM);

	err = omfs_allocate_range(dir->i_sb, sbi->s_mirrors, sbi->s_mirrors,
			&new_block, &len);
	if (err)
		goto fail;

	inode->i_ino = new_block;
	inode->i_mode = mode;
	inode->i_uid = current_fsuid();
	inode->i_gid = current_fsgid();
	inode->i_mapping->a_ops = &omfs_aops;

	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
	switch (mode & S_IFMT) {
	case S_IFDIR:
		inode->i_op = &omfs_dir_inops;
		inode->i_fop = &omfs_dir_operations;
		inode->i_size = sbi->s_sys_blocksize;
		inc_nlink(inode);
		break;
	case S_IFREG:
		inode->i_op = &omfs_file_inops;
		inode->i_fop = &omfs_file_operations;
		inode->i_size = 0;
		break;
	}

	insert_inode_hash(inode);
	mark_inode_dirty(inode);
	return inode;
fail:
	make_bad_inode(inode);
	iput(inode);
	return ERR_PTR(err);
}
示例#2
0
static int omfs_grow_extent(struct inode *inode, struct omfs_extent *oe,
			u64 *ret_block)
{
	struct omfs_extent_entry *terminator;
	struct omfs_extent_entry *entry = &oe->e_entry;
	struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb);
	u32 extent_count = be32_to_cpu(oe->e_extent_count);
	u64 new_block = 0;
	u32 max_count;
	int new_count;
	int ret = 0;

	/* reached the end of the extent table with no blocks mapped.
	 * there are three possibilities for adding: grow last extent,
	 * add a new extent to the current extent table, and add a
	 * continuation inode.  in last two cases need an allocator for
	 * sbi->s_cluster_size
	 */

	/* TODO: handle holes */

	/* should always have a terminator */
	if (extent_count < 1)
		return -EIO;

	/* trivially grow current extent, if next block is not taken */
	terminator = entry + extent_count - 1;
	if (extent_count > 1) {
		entry = terminator-1;
		new_block = be64_to_cpu(entry->e_cluster) +
			be64_to_cpu(entry->e_blocks);

		if (omfs_allocate_block(inode->i_sb, new_block)) {
			entry->e_blocks =
				cpu_to_be64(be64_to_cpu(entry->e_blocks) + 1);
			terminator->e_blocks = ~(cpu_to_be64(
				be64_to_cpu(~terminator->e_blocks) + 1));
			goto out;
		}
	}
	max_count = omfs_max_extents(sbi, OMFS_EXTENT_START);

	/* TODO: add a continuation block here */
	if (be32_to_cpu(oe->e_extent_count) > max_count-1)
		return -EIO;

	/* try to allocate a new cluster */
	ret = omfs_allocate_range(inode->i_sb, 1, sbi->s_clustersize,
		&new_block, &new_count);
	if (ret)
		goto out_fail;

	/* copy terminator down an entry */
	entry = terminator;
	terminator++;
	memcpy(terminator, entry, sizeof(struct omfs_extent_entry));

	entry->e_cluster = cpu_to_be64(new_block);
	entry->e_blocks = cpu_to_be64((u64) new_count);

	terminator->e_blocks = ~(cpu_to_be64(
		be64_to_cpu(~terminator->e_blocks) + (u64) new_count));

	/* write in new entry */
	oe->e_extent_count = cpu_to_be32(1 + be32_to_cpu(oe->e_extent_count));

out:
	*ret_block = new_block;
out_fail:
	return ret;
}