Ejemplo n.º 1
0
Archivo: inode.c Proyecto: 3null/linux
struct inode *affs_iget(struct super_block *sb, unsigned long ino)
{
	struct affs_sb_info	*sbi = AFFS_SB(sb);
	struct buffer_head	*bh;
	struct affs_tail	*tail;
	struct inode		*inode;
	u32			 block;
	u32			 size;
	u32			 prot;
	u16			 id;

	inode = iget_locked(sb, ino);
	if (!inode)
		return ERR_PTR(-ENOMEM);
	if (!(inode->i_state & I_NEW))
		return inode;

	pr_debug("affs_iget(%lu)\n", inode->i_ino);

	block = inode->i_ino;
	bh = affs_bread(sb, block);
	if (!bh) {
		affs_warning(sb, "read_inode", "Cannot read block %d", block);
		goto bad_inode;
	}
	if (affs_checksum_block(sb, bh) || be32_to_cpu(AFFS_HEAD(bh)->ptype) != T_SHORT) {
		affs_warning(sb,"read_inode",
			   "Checksum or type (ptype=%d) error on inode %d",
			   AFFS_HEAD(bh)->ptype, block);
		goto bad_inode;
	}

	tail = AFFS_TAIL(sb, bh);
	prot = be32_to_cpu(tail->protect);

	inode->i_size = 0;
	set_nlink(inode, 1);
	inode->i_mode = 0;
	AFFS_I(inode)->i_extcnt = 1;
	AFFS_I(inode)->i_ext_last = ~1;
	AFFS_I(inode)->i_protect = prot;
	atomic_set(&AFFS_I(inode)->i_opencnt, 0);
	AFFS_I(inode)->i_blkcnt = 0;
	AFFS_I(inode)->i_lc = NULL;
	AFFS_I(inode)->i_lc_size = 0;
	AFFS_I(inode)->i_lc_shift = 0;
	AFFS_I(inode)->i_lc_mask = 0;
	AFFS_I(inode)->i_ac = NULL;
	AFFS_I(inode)->i_ext_bh = NULL;
	AFFS_I(inode)->mmu_private = 0;
	AFFS_I(inode)->i_lastalloc = 0;
	AFFS_I(inode)->i_pa_cnt = 0;

	if (sbi->s_flags & SF_SETMODE)
		inode->i_mode = sbi->s_mode;
	else
		inode->i_mode = prot_to_mode(prot);

	id = be16_to_cpu(tail->uid);
	if (id == 0 || sbi->s_flags & SF_SETUID)
		inode->i_uid = sbi->s_uid;
	else if (id == 0xFFFF && sbi->s_flags & SF_MUFS)
		i_uid_write(inode, 0);
	else
		i_uid_write(inode, id);

	id = be16_to_cpu(tail->gid);
	if (id == 0 || sbi->s_flags & SF_SETGID)
		inode->i_gid = sbi->s_gid;
	else if (id == 0xFFFF && sbi->s_flags & SF_MUFS)
		i_gid_write(inode, 0);
	else
		i_gid_write(inode, id);

	switch (be32_to_cpu(tail->stype)) {
	case ST_ROOT:
		inode->i_uid = sbi->s_uid;
		inode->i_gid = sbi->s_gid;
		/* fall through */
	case ST_USERDIR:
		if (be32_to_cpu(tail->stype) == ST_USERDIR ||
		    sbi->s_flags & SF_SETMODE) {
			if (inode->i_mode & S_IRUSR)
				inode->i_mode |= S_IXUSR;
			if (inode->i_mode & S_IRGRP)
				inode->i_mode |= S_IXGRP;
			if (inode->i_mode & S_IROTH)
				inode->i_mode |= S_IXOTH;
			inode->i_mode |= S_IFDIR;
		} else
			inode->i_mode = S_IRUGO | S_IXUGO | S_IWUSR | S_IFDIR;
		/* Maybe it should be controlled by mount parameter? */
		//inode->i_mode |= S_ISVTX;
		inode->i_op = &affs_dir_inode_operations;
		inode->i_fop = &affs_dir_operations;
		break;
	case ST_LINKDIR:
#if 0
		affs_warning(sb, "read_inode", "inode is LINKDIR");
		goto bad_inode;
#else
		inode->i_mode |= S_IFDIR;
		/* ... and leave ->i_op and ->i_fop pointing to empty */
		break;
#endif
	case ST_LINKFILE:
		affs_warning(sb, "read_inode", "inode is LINKFILE");
		goto bad_inode;
	case ST_FILE:
		size = be32_to_cpu(tail->size);
		inode->i_mode |= S_IFREG;
		AFFS_I(inode)->mmu_private = inode->i_size = size;
		if (inode->i_size) {
			AFFS_I(inode)->i_blkcnt = (size - 1) /
					       sbi->s_data_blksize + 1;
			AFFS_I(inode)->i_extcnt = (AFFS_I(inode)->i_blkcnt - 1) /
					       sbi->s_hashsize + 1;
		}
		if (tail->link_chain)
			set_nlink(inode, 2);
		inode->i_mapping->a_ops = (sbi->s_flags & SF_OFS) ? &affs_aops_ofs : &affs_aops;
		inode->i_op = &affs_file_inode_operations;
		inode->i_fop = &affs_file_operations;
		break;
	case ST_SOFTLINK:
		inode->i_mode |= S_IFLNK;
		inode->i_op = &affs_symlink_inode_operations;
		inode->i_data.a_ops = &affs_symlink_aops;
		break;
	}

	inode->i_mtime.tv_sec = inode->i_atime.tv_sec = inode->i_ctime.tv_sec
		       = (be32_to_cpu(tail->change.days) * (24 * 60 * 60) +
		         be32_to_cpu(tail->change.mins) * 60 +
			 be32_to_cpu(tail->change.ticks) / 50 +
			 ((8 * 365 + 2) * 24 * 60 * 60)) +
			 sys_tz.tz_minuteswest * 60;
	inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_atime.tv_nsec = 0;
	affs_brelse(bh);
	unlock_new_inode(inode);
	return inode;

bad_inode:
	affs_brelse(bh);
	iget_failed(inode);
	return ERR_PTR(-EIO);
}
Ejemplo n.º 2
0
static int do_read_inode(struct inode *inode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct f2fs_inode_info *fi = F2FS_I(inode);
	struct page *node_page;
	struct f2fs_inode *ri;

	/* Check if ino is within scope */
	if (check_nid_range(sbi, inode->i_ino)) {
		f2fs_msg(inode->i_sb, KERN_ERR, "bad inode number: %lu",
			 (unsigned long) inode->i_ino);
		WARN_ON(1);
		return -EINVAL;
	}

	node_page = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(node_page))
		return PTR_ERR(node_page);

	ri = F2FS_INODE(node_page);

	inode->i_mode = le16_to_cpu(ri->i_mode);
	i_uid_write(inode, le32_to_cpu(ri->i_uid));
	i_gid_write(inode, le32_to_cpu(ri->i_gid));
	set_nlink(inode, le32_to_cpu(ri->i_links));
	inode->i_size = le64_to_cpu(ri->i_size);
	inode->i_blocks = le64_to_cpu(ri->i_blocks);

	inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime);
	inode->i_ctime.tv_sec = le64_to_cpu(ri->i_ctime);
	inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime);
	inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
	inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
	inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
	inode->i_generation = le32_to_cpu(ri->i_generation);

	fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
	fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
	fi->i_flags = le32_to_cpu(ri->i_flags);
	fi->flags = 0;
	fi->i_advise = ri->i_advise;
	fi->i_pino = le32_to_cpu(ri->i_pino);
	fi->i_dir_level = ri->i_dir_level;

	if (f2fs_init_extent_tree(inode, &ri->i_ext))
		set_page_dirty(node_page);

	get_inline_info(inode, ri);

	/* check data exist */
	if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
		__recover_inline_status(inode, node_page);

	/* get rdev by using inline_info */
	__get_inode_rdev(inode, ri);

	if (__written_first_block(ri))
		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);

	if (!need_inode_block_update(sbi, inode->i_ino))
		fi->last_disk_size = inode->i_size;

	f2fs_put_page(node_page, 1);

	stat_inc_inline_xattr(inode);
	stat_inc_inline_inode(inode);
	stat_inc_inline_dir(inode);

	return 0;
}
Ejemplo n.º 3
0
void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
			  int create_ino)
{
	struct super_block *sb;
	struct ocfs2_super *osb;
	int use_plocks = 1;

	sb = inode->i_sb;
	osb = OCFS2_SB(sb);

	if ((osb->s_mount_opt & OCFS2_MOUNT_LOCALFLOCKS) ||
	    ocfs2_mount_local(osb) || !ocfs2_stack_supports_plocks())
		use_plocks = 0;

	/*
	 * These have all been checked by ocfs2_read_inode_block() or set
	 * by ocfs2_mknod_locked(), so a failure is a code bug.
	 */
	BUG_ON(!OCFS2_IS_VALID_DINODE(fe));  /* This means that read_inode
						cannot create a superblock
						inode today.  change if
						that is needed. */
	BUG_ON(!(fe->i_flags & cpu_to_le32(OCFS2_VALID_FL)));
	BUG_ON(le32_to_cpu(fe->i_fs_generation) != osb->fs_generation);


	OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
	OCFS2_I(inode)->ip_attr = le32_to_cpu(fe->i_attr);
	OCFS2_I(inode)->ip_dyn_features = le16_to_cpu(fe->i_dyn_features);

	inode_set_iversion(inode, 1);
	inode->i_generation = le32_to_cpu(fe->i_generation);
	inode->i_rdev = huge_decode_dev(le64_to_cpu(fe->id1.dev1.i_rdev));
	inode->i_mode = le16_to_cpu(fe->i_mode);
	i_uid_write(inode, le32_to_cpu(fe->i_uid));
	i_gid_write(inode, le32_to_cpu(fe->i_gid));

	/* Fast symlinks will have i_size but no allocated clusters. */
	if (S_ISLNK(inode->i_mode) && !fe->i_clusters) {
		inode->i_blocks = 0;
		inode->i_mapping->a_ops = &ocfs2_fast_symlink_aops;
	} else {
		inode->i_blocks = ocfs2_inode_sector_count(inode);
		inode->i_mapping->a_ops = &ocfs2_aops;
	}
	inode->i_atime.tv_sec = le64_to_cpu(fe->i_atime);
	inode->i_atime.tv_nsec = le32_to_cpu(fe->i_atime_nsec);
	inode->i_mtime.tv_sec = le64_to_cpu(fe->i_mtime);
	inode->i_mtime.tv_nsec = le32_to_cpu(fe->i_mtime_nsec);
	inode->i_ctime.tv_sec = le64_to_cpu(fe->i_ctime);
	inode->i_ctime.tv_nsec = le32_to_cpu(fe->i_ctime_nsec);

	if (OCFS2_I(inode)->ip_blkno != le64_to_cpu(fe->i_blkno))
		mlog(ML_ERROR,
		     "ip_blkno %llu != i_blkno %llu!\n",
		     (unsigned long long)OCFS2_I(inode)->ip_blkno,
		     (unsigned long long)le64_to_cpu(fe->i_blkno));

	set_nlink(inode, ocfs2_read_links_count(fe));

	trace_ocfs2_populate_inode(OCFS2_I(inode)->ip_blkno,
				   le32_to_cpu(fe->i_flags));
	if (fe->i_flags & cpu_to_le32(OCFS2_SYSTEM_FL)) {
		OCFS2_I(inode)->ip_flags |= OCFS2_INODE_SYSTEM_FILE;
		inode->i_flags |= S_NOQUOTA;
	}
  
	if (fe->i_flags & cpu_to_le32(OCFS2_LOCAL_ALLOC_FL)) {
		OCFS2_I(inode)->ip_flags |= OCFS2_INODE_BITMAP;
	} else if (fe->i_flags & cpu_to_le32(OCFS2_BITMAP_FL)) {
		OCFS2_I(inode)->ip_flags |= OCFS2_INODE_BITMAP;
	} else if (fe->i_flags & cpu_to_le32(OCFS2_QUOTA_FL)) {
		inode->i_flags |= S_NOQUOTA;
	} else if (fe->i_flags & cpu_to_le32(OCFS2_SUPER_BLOCK_FL)) {
		/* we can't actually hit this as read_inode can't
		 * handle superblocks today ;-) */
		BUG();
	}

	switch (inode->i_mode & S_IFMT) {
	    case S_IFREG:
		    if (use_plocks)
			    inode->i_fop = &ocfs2_fops;
		    else
			    inode->i_fop = &ocfs2_fops_no_plocks;
		    inode->i_op = &ocfs2_file_iops;
		    i_size_write(inode, le64_to_cpu(fe->i_size));
		    break;
	    case S_IFDIR:
		    inode->i_op = &ocfs2_dir_iops;
		    if (use_plocks)
			    inode->i_fop = &ocfs2_dops;
		    else
			    inode->i_fop = &ocfs2_dops_no_plocks;
		    i_size_write(inode, le64_to_cpu(fe->i_size));
		    OCFS2_I(inode)->ip_dir_lock_gen = 1;
		    break;
	    case S_IFLNK:
		    inode->i_op = &ocfs2_symlink_inode_operations;
		    inode_nohighmem(inode);
		    i_size_write(inode, le64_to_cpu(fe->i_size));
		    break;
	    default:
		    inode->i_op = &ocfs2_special_file_iops;
		    init_special_inode(inode, inode->i_mode,
				       inode->i_rdev);
		    break;
	}

	if (create_ino) {
		inode->i_ino = ino_from_blkno(inode->i_sb,
			       le64_to_cpu(fe->i_blkno));

		/*
		 * If we ever want to create system files from kernel,
		 * the generation argument to
		 * ocfs2_inode_lock_res_init() will have to change.
		 */
		BUG_ON(le32_to_cpu(fe->i_flags) & OCFS2_SYSTEM_FL);

		ocfs2_inode_lock_res_init(&OCFS2_I(inode)->ip_inode_lockres,
					  OCFS2_LOCK_TYPE_META, 0, inode);

		ocfs2_inode_lock_res_init(&OCFS2_I(inode)->ip_open_lockres,
					  OCFS2_LOCK_TYPE_OPEN, 0, inode);
	}

	ocfs2_inode_lock_res_init(&OCFS2_I(inode)->ip_rw_lockres,
				  OCFS2_LOCK_TYPE_RW, inode->i_generation,
				  inode);

	ocfs2_set_inode_flags(inode);

	OCFS2_I(inode)->ip_last_used_slot = 0;
	OCFS2_I(inode)->ip_last_used_group = 0;

	if (S_ISDIR(inode->i_mode))
		ocfs2_resv_set_type(&OCFS2_I(inode)->ip_la_data_resv,
				    OCFS2_RESV_FLAG_DIR);
}
/*
_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/
	Function	:me2fsGetVfsInode
	Input		:struct super_block *sb
				 < vfs super block >
				 unsigned int ino
				 < inode number for allocating new inode >
	Output		:void
	Return		:struct inode *inode
				 < vfs inode >

	Description	:allocate me2fs inode info, get locked vfs inode, read ext2 inode
				 from a disk and fill them
_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/
*/
struct inode *me2fsGetVfsInode( struct super_block *sb, unsigned int ino )
{
	struct inode			*inode;
	struct me2fs_inode_info	*mei;
	struct ext2_inode		*ext2_inode;
	struct buffer_head		*bh;
	uid_t					i_uid;
	gid_t					i_gid;
	int						i;

	/* ------------------------------------------------------------------------ */
	/* find an inode cache or allocate inode and lock it						*/
	/* ------------------------------------------------------------------------ */
	inode = iget_locked( sb, ino );

	if( !inode )
	{
		return( ERR_PTR( -ENOMEM ) );
	}

	/* ------------------------------------------------------------------------ */
	/* get an inode which has already existed									*/
	/* ------------------------------------------------------------------------ */
	if( !( inode->i_state & I_NEW ) )
	{
		return( inode );
	}
	
	/* ------------------------------------------------------------------------ */
	/* read ext2 inode															*/
	/* ------------------------------------------------------------------------ */
	mei = ME2FS_I( inode );

	ext2_inode = me2fsGetExt2Inode( inode->i_sb, ino, &bh );

	if( IS_ERR( ext2_inode ) )
	{
		iget_failed( inode );
		return( ( struct inode* )( ext2_inode ) );
	}

	dbgPrintExt2InodeInfo( ext2_inode );

	/* ------------------------------------------------------------------------ */
	/* set up vfs inode															*/
	/* ------------------------------------------------------------------------ */
	inode->i_mode	= le16_to_cpu( ext2_inode->i_mode );
	i_uid			= ( uid_t )le16_to_cpu( ext2_inode->i_uid );	// low bytes
	i_gid			= ( gid_t )le16_to_cpu( ext2_inode->i_gid );	// low bytes
	if( ME2FS_SB( sb )->s_mount_opt & EXT2_MOUNT_NO_UID32 )
	{
		/* do nothing															*/
	}
	else
	{
		i_uid |= le16_to_cpu( ext2_inode->osd2.linux2.l_i_uid_high ) << 16;
		i_gid |= le16_to_cpu( ext2_inode->osd2.linux2.l_i_gid_high ) << 16;
	}
	i_uid_write( inode, i_uid );
	i_gid_write( inode, i_gid );
	set_nlink( inode, le16_to_cpu( ext2_inode->i_links_count ) );
	inode->i_size	= le32_to_cpu( ext2_inode->i_size );
	inode->i_atime.tv_sec	= ( signed int )le32_to_cpu( ext2_inode->i_atime );
	inode->i_ctime.tv_sec	= ( signed int )le32_to_cpu( ext2_inode->i_ctime );
	inode->i_mtime.tv_sec	= ( signed int )le32_to_cpu( ext2_inode->i_mtime );
	inode->i_atime.tv_nsec	= 0;
	inode->i_ctime.tv_nsec	= 0;
	inode->i_mtime.tv_nsec	= 0;
	mei->i_dtime			= le32_to_cpu( ext2_inode->i_dtime );

	if( ( inode->i_nlink == 0 ) &&
		( ( inode->i_mode == 0 ) || ( mei->i_dtime ) ) )
	{
		brelse( bh );
		iget_failed( inode );
		return( ERR_PTR( -ESTALE ) );
	}

	inode->i_blocks			= le32_to_cpu( ext2_inode->i_blocks );
	mei->i_flags			= le32_to_cpu( ext2_inode->i_flags );
	mei->i_faddr			= le32_to_cpu( ext2_inode->i_faddr );
	mei->i_frag_no			= ext2_inode->osd2.linux2.l_i_frag;
	mei->i_frag_size		= ext2_inode->osd2.linux2.l_i_fsize;
	mei->i_file_acl			= le32_to_cpu( ext2_inode->i_file_acl );
	mei->i_dir_acl			= 0;
	if( S_ISREG( inode->i_mode ) )
	{
		/* use i_dir_acl higher 32 byte of file size							*/
		inode->i_size		|= ( ( __u64 )le32_to_cpu( ext2_inode->i_dir_acl ) )
							   << 32;
	}
	else
	{
		mei->i_dir_acl		= le32_to_cpu( ext2_inode->i_dir_acl );
	}
	mei->i_dtime			= 0;
	inode->i_generation		= le32_to_cpu( ext2_inode->i_generation );
	mei->i_state			= 0;
	mei->i_block_group		= ( ino - 1 ) / ME2FS_SB( sb )->s_inodes_per_group;
	mei->i_dir_start_lookup	= 0;

	for( i = 0 ; i < ME2FS_NR_BLOCKS ; i++ )
	{
		mei->i_data[ i ] = ext2_inode->i_block[ i ];
	}

	if( S_ISREG( inode->i_mode ) )
	{
	}
	else if( S_ISDIR( inode->i_mode ) )
	{
		DBGPRINT( "<ME2FS>get directory inode!\n" );
		inode->i_fop = &me2fs_dir_operations;
		inode->i_op = &me2fs_dir_inode_operations;
	}
	else if( S_ISLNK( inode->i_mode ) )
	{
	}
	else
	{
	}

	brelse( bh );
	me2fsSetVfsInodeFlags( inode );

	dbgPrintMe2fsInodeInfo( mei );
	dbgPrintVfsInode( inode );

	unlock_new_inode( inode );

	return( inode );
}
static ssize_t stage1_store(struct kobject *kobj, struct kobj_attribute *attr,
			    const char *buf, size_t count)
{
	char cur_name[DEVNAME_SIZE] = "";
	int test_len, cur_len;
	int i, j;
	int id;
	int select;
	struct msdc_host *host;
	int ret;
	/* char *p_log; */
	/* id = 3; */
	select = -1;
	/* sscanf(kobj->name, "%d", &id); */
	kstrtoint(kobj->name, 0, &id);

	if (id >= HOST_MAX_NUM) {
		pr_err("[%s] id<%d> is bigger than HOST_MAX_NUM<%d>\n", __func__, id, HOST_MAX_NUM);
		return count;
	}

	host = mtk_msdc_host[id];
	/* sscanf(attr->attr.name, "%s", cur_name); */
	strncpy(cur_name, attr->attr.name, DEVNAME_SIZE);

	for (i = 0; i < TOTAL_STAGE1_NODE_COUNT; i++) {
		test_len = strlen(stage1_nodes[i]);
		cur_len = strlen(cur_name);
		if ((test_len == cur_len) && (strncmp(stage1_nodes[i], cur_name, cur_len) == 0)) {
			select = i;
			break;
		}
	}

	switch (select) {
	case VOLTAGE:
		/* sscanf(buf, "%u", &cur_voltage[id]); */
		kstrtou32(buf, 0, &cur_voltage[id]);
		break;
	case PARAMS:
		memset(cur_name, 0, DEVNAME_SIZE);
		cur_name[0] = 1;
		cur_name[1] = E_AUTOK_PARM_MAX;
		memcpy(&cur_name[2], &cur_voltage[id], sizeof(unsigned int));
		store_autok(&p_single_autok[id], cur_name, count);

		pr_debug("[AUTOKD] Enter Store Autok");
		pr_debug("[AUTOKD] p_single_autok[%d].vol_count=%d", id,
		       p_single_autok[id].vol_count);
		pr_debug("[AUTOKD] p_single_autok[%d].param_count=%d", id,
		       p_single_autok[id].param_count);
		for (i = 0; i < p_single_autok[id].vol_count; i++) {
			pr_debug("[AUTOKD] p_single_autok[%d].vol_list[%d]=%d", id, i,
			       p_single_autok[id].vol_list[i]);
		}
		for (i = 0; i < p_single_autok[id].vol_count; i++) {
			for (j = 0; j < p_single_autok[id].param_count; j++)
				pr_debug("[AUTOKD] p_single_autok[%d].ai_data[%d][%d]=%d", id, i, j,
				       p_single_autok[id].ai_data[i][j].data.sel);
		}
		/* [FIXDONE] Start to do autok alforithm; data is in p_single_autok */
#ifdef UT_TEST
#error "remove me"
		if (is_first_stage1 == 1) {
			/* claim host */
#ifdef CONFIG_SDIOAUTOK_SUPPORT
			/* mt_cpufreq_disable(0, true); */
			/* FIXME@CCJ mt_vcore_dvfs_disable_by_sdio(0, true); */
#endif
#ifdef MTK_SDIO30_ONLINE_TUNING_SUPPORT
			atomic_set(&host->ot_work.ot_disable, 1);
#endif				/* MTK_SDIO30_ONLINE_TUNING_SUPPORT */
			autok_claim_host(host);

			is_first_stage1 = 0;
		}
#endif
#ifdef AUTOK_THREAD
		p_autok_thread_data->host = host;
		p_autok_thread_data->stage = 1;
		p_autok_thread_data->p_autok_predata = &p_single_autok[id];
		p_autok_thread_data->log = autok_log_info;
		task = kthread_run(&autok_thread_func, (void *)(p_autok_thread_data), "autokp");
#endif
		break;
	case DONE:
		/* sscanf(buf, "%d", &i); */
		kstrtoint(buf, 0, &i);
		p_autok_thread_data->is_autok_done[id] = (u8) i;
		break;
	case LOG:
		/* sscanf(buf, "%d", &i); */
		kstrtoint(buf, 0, &i);
		if (is_full_log != i) {
			is_full_log = i;
			if (i == 0) {
				debugfs_remove(autok_log_entry);
				/* kfree(autok_log_info); */
			} else {
				autok_log_entry =
				    debugfs_create_file("autok_log", 0660, NULL, NULL,
							&autok_log_fops);
				i_gid_write(autok_log_entry->d_inode, 1000);
				autok_log_info = kzalloc(LOG_SIZE, GFP_KERNEL);
				total_msg_size = 0;	/* rewind the counter to 0 when allocate new buffer */
			}
		}
		break;
	default:
		break;
	}
	return count;
}
Ejemplo n.º 6
0
static int jffs2_do_setattr (struct _inode *inode, struct iattr *iattr)
{
    struct jffs2_full_dnode *old_metadata, *new_metadata;
    struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
    struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
    struct jffs2_raw_inode *ri;
    unsigned char *mdata = NULL;
    int mdatalen = 0;
    unsigned int ivalid;
    uint32_t alloclen;
    int ret;
    int alloc_type = ALLOC_NORMAL;

    jffs2_dbg(1, "%s(): ino #%lu\n", __func__, inode->i_ino);

    /* Special cases - we don't want more than one data node
       for these types on the medium at any time. So setattr
       must read the original data associated with the node
       (i.e. the device numbers or the target name) and write
       it out again with the appropriate data attached */
    if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) {
        return -EIO;
    } else if (S_ISLNK(inode->i_mode)) {
        mutex_lock(&f->sem);
        mdatalen = f->metadata->size;
        mdata = kmalloc(f->metadata->size, GFP_USER);
        if (!mdata) {
            mutex_unlock(&f->sem);
            return -ENOMEM;
        }
        ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen);
        if (ret) {
            mutex_unlock(&f->sem);
            kfree(mdata);
            return ret;
        }
        mutex_unlock(&f->sem);
        jffs2_dbg(1, "%s(): Writing %d bytes of symlink target\n",
                  __func__, mdatalen);
    }

    ri = jffs2_alloc_raw_inode();
    if (!ri) {
        if (S_ISLNK(inode->i_mode))
            kfree(mdata);
        return -ENOMEM;
    }

    ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &alloclen,
                              ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
    if (ret) {
        jffs2_free_raw_inode(ri);
        if (S_ISLNK(inode->i_mode))
            kfree(mdata);
        return ret;
    }
    mutex_lock(&f->sem);
    ivalid = iattr->ia_valid;

    ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
    ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
    ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen);
    ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));

    ri->ino = cpu_to_je32(inode->i_ino);
    ri->version = cpu_to_je32(++f->highest_version);

    ri->uid = cpu_to_je16((ivalid & ATTR_UID)?
                          from_kuid(&init_user_ns, iattr->ia_uid):i_uid_read(inode));
    ri->gid = cpu_to_je16((ivalid & ATTR_GID)?
                          from_kgid(&init_user_ns, iattr->ia_gid):i_gid_read(inode));

    if (ivalid & ATTR_MODE)
        ri->mode = cpu_to_jemode(iattr->ia_mode);
    else
        ri->mode = cpu_to_jemode(inode->i_mode);


    ri->isize = cpu_to_je32((ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size);
    ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode->i_atime));
    ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode->i_mtime));
    ri->ctime = cpu_to_je32(I_SEC((ivalid & ATTR_CTIME)?iattr->ia_ctime:inode->i_ctime));

    ri->offset = cpu_to_je32(0);
    ri->csize = ri->dsize = cpu_to_je32(mdatalen);
    ri->compr = JFFS2_COMPR_NONE;
    if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
        /* It's an extension. Make it a hole node */
        ri->compr = JFFS2_COMPR_ZERO;
        ri->dsize = cpu_to_je32(iattr->ia_size - inode->i_size);
        ri->offset = cpu_to_je32(inode->i_size);
    } else if (ivalid & ATTR_SIZE && !iattr->ia_size) {
        /* For truncate-to-zero, treat it as deletion because
           it'll always be obsoleting all previous nodes */
        alloc_type = ALLOC_DELETION;
    }
    ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
    if (mdatalen)
        ri->data_crc = cpu_to_je32(crc32(0, mdata, mdatalen));
    else
        ri->data_crc = cpu_to_je32(0);

    new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, alloc_type);
    if (S_ISLNK(inode->i_mode))
        kfree(mdata);

    if (IS_ERR(new_metadata)) {
        jffs2_complete_reservation(c);
        jffs2_free_raw_inode(ri);
        mutex_unlock(&f->sem);
        return PTR_ERR(new_metadata);
    }
    /* It worked. Update the inode */
    inode->i_atime = ITIME(je32_to_cpu(ri->atime));
    inode->i_ctime = ITIME(je32_to_cpu(ri->ctime));
    inode->i_mtime = ITIME(je32_to_cpu(ri->mtime));
    inode->i_mode = jemode_to_cpu(ri->mode);
    i_uid_write(inode, je16_to_cpu(ri->uid));
    i_gid_write(inode, je16_to_cpu(ri->gid));


    old_metadata = f->metadata;

    if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size)
        jffs2_truncate_fragtree (c, &f->fragtree, iattr->ia_size);

    if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
        jffs2_add_full_dnode_to_inode(c, f, new_metadata);
        inode->i_size = iattr->ia_size;
        f->metadata = NULL;
    } else {
        f->metadata = new_metadata;
    }
    if (old_metadata) {
        jffs2_mark_node_obsolete(c, old_metadata->raw);
        jffs2_free_full_dnode(old_metadata);
    }
    jffs2_free_raw_inode(ri);

    mutex_unlock(&f->sem);
    jffs2_complete_reservation(c);

    /* We have to do the truncate_setsize() without f->sem held, since
       some pages may be locked and waiting for it in readpage().
       We are protected from a simultaneous write() extending i_size
       back past iattr->ia_size, because do_truncate() holds the
       generic inode semaphore. */
    if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) {
        truncate_setsize(inode, iattr->ia_size);
    }

    return 0;
}
Ejemplo n.º 7
0
struct inode *bfs_iget(struct super_block *sb, unsigned long ino)
{
	struct bfs_inode *di;
	struct inode *inode;
	struct buffer_head *bh;
	int block, off;

	inode = iget_locked(sb, ino);
	if (!inode)
		return ERR_PTR(-ENOMEM);
	if (!(inode->i_state & I_NEW))
		return inode;

	if ((ino < BFS_ROOT_INO) || (ino > BFS_SB(inode->i_sb)->si_lasti)) {
		printf("Bad inode number %s:%08lx\n", inode->i_sb->s_id, ino);
		goto error;
	}

	block = (ino - BFS_ROOT_INO) / BFS_INODES_PER_BLOCK + 1;
	bh = sb_bread(inode->i_sb, block);
	if (!bh) {
		printf("Unable to read inode %s:%08lx\n", inode->i_sb->s_id,
									ino);
		goto error;
	}

	off = (ino - BFS_ROOT_INO) % BFS_INODES_PER_BLOCK;
	di = (struct bfs_inode *)bh->b_data + off;

	inode->i_mode = 0x0000FFFF & le32_to_cpu(di->i_mode);
	if (le32_to_cpu(di->i_vtype) == BFS_VDIR) {
		inode->i_mode |= S_IFDIR;
		inode->i_op = &bfs_dir_inops;
		inode->i_fop = &bfs_dir_operations;
	} else if (le32_to_cpu(di->i_vtype) == BFS_VREG) {
		inode->i_mode |= S_IFREG;
		inode->i_op = &bfs_file_inops;
		inode->i_fop = &bfs_file_operations;
		inode->i_mapping->a_ops = &bfs_aops;
	}

	BFS_I(inode)->i_sblock =  le32_to_cpu(di->i_sblock);
	BFS_I(inode)->i_eblock =  le32_to_cpu(di->i_eblock);
	BFS_I(inode)->i_dsk_ino = le16_to_cpu(di->i_ino);
	i_uid_write(inode, le32_to_cpu(di->i_uid));
	i_gid_write(inode,  le32_to_cpu(di->i_gid));
	set_nlink(inode, le32_to_cpu(di->i_nlink));
	inode->i_size = BFS_FILESIZE(di);
	inode->i_blocks = BFS_FILEBLOCKS(di);
	inode->i_atime.tv_sec =  le32_to_cpu(di->i_atime);
	inode->i_mtime.tv_sec =  le32_to_cpu(di->i_mtime);
	inode->i_ctime.tv_sec =  le32_to_cpu(di->i_ctime);
	inode->i_atime.tv_nsec = 0;
	inode->i_mtime.tv_nsec = 0;
	inode->i_ctime.tv_nsec = 0;

	brelse(bh);
	unlock_new_inode(inode);
	return inode;

error:
	iget_failed(inode);
	return ERR_PTR(-EIO);
}
Ejemplo n.º 8
0
Archivo: inode.c Proyecto: 020gzh/linux
void hpfs_read_inode(struct inode *i)
{
	struct buffer_head *bh;
	struct fnode *fnode;
	struct super_block *sb = i->i_sb;
	struct hpfs_inode_info *hpfs_inode = hpfs_i(i);
	void *ea;
	int ea_size;

	if (!(fnode = hpfs_map_fnode(sb, i->i_ino, &bh))) {
		/*i->i_mode |= S_IFREG;
		i->i_mode &= ~0111;
		i->i_op = &hpfs_file_iops;
		i->i_fop = &hpfs_file_ops;
		clear_nlink(i);*/
		make_bad_inode(i);
		return;
	}
	if (hpfs_sb(i->i_sb)->sb_eas) {
		if ((ea = hpfs_get_ea(i->i_sb, fnode, "UID", &ea_size))) {
			if (ea_size == 2) {
				i_uid_write(i, le16_to_cpu(*(__le16*)ea));
				hpfs_inode->i_ea_uid = 1;
			}
			kfree(ea);
		}
		if ((ea = hpfs_get_ea(i->i_sb, fnode, "GID", &ea_size))) {
			if (ea_size == 2) {
				i_gid_write(i, le16_to_cpu(*(__le16*)ea));
				hpfs_inode->i_ea_gid = 1;
			}
			kfree(ea);
		}
		if ((ea = hpfs_get_ea(i->i_sb, fnode, "SYMLINK", &ea_size))) {
			kfree(ea);
			i->i_mode = S_IFLNK | 0777;
			i->i_op = &page_symlink_inode_operations;
			inode_nohighmem(i);
			i->i_data.a_ops = &hpfs_symlink_aops;
			set_nlink(i, 1);
			i->i_size = ea_size;
			i->i_blocks = 1;
			brelse(bh);
			return;
		}
		if ((ea = hpfs_get_ea(i->i_sb, fnode, "MODE", &ea_size))) {
			int rdev = 0;
			umode_t mode = hpfs_sb(sb)->sb_mode;
			if (ea_size == 2) {
				mode = le16_to_cpu(*(__le16*)ea);
				hpfs_inode->i_ea_mode = 1;
			}
			kfree(ea);
			i->i_mode = mode;
			if (S_ISBLK(mode) || S_ISCHR(mode)) {
				if ((ea = hpfs_get_ea(i->i_sb, fnode, "DEV", &ea_size))) {
					if (ea_size == 4)
						rdev = le32_to_cpu(*(__le32*)ea);
					kfree(ea);
				}
			}
			if (S_ISBLK(mode) || S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
				brelse(bh);
				set_nlink(i, 1);
				i->i_size = 0;
				i->i_blocks = 1;
				init_special_inode(i, mode,
					new_decode_dev(rdev));
				return;
			}
		}
	}
	if (fnode_is_dir(fnode)) {
		int n_dnodes, n_subdirs;
		i->i_mode |= S_IFDIR;
		i->i_op = &hpfs_dir_iops;
		i->i_fop = &hpfs_dir_ops;
		hpfs_inode->i_parent_dir = le32_to_cpu(fnode->up);
		hpfs_inode->i_dno = le32_to_cpu(fnode->u.external[0].disk_secno);
		if (hpfs_sb(sb)->sb_chk >= 2) {
			struct buffer_head *bh0;
			if (hpfs_map_fnode(sb, hpfs_inode->i_parent_dir, &bh0)) brelse(bh0);
		}
		n_dnodes = 0; n_subdirs = 0;
		hpfs_count_dnodes(i->i_sb, hpfs_inode->i_dno, &n_dnodes, &n_subdirs, NULL);
		i->i_blocks = 4 * n_dnodes;
		i->i_size = 2048 * n_dnodes;
		set_nlink(i, 2 + n_subdirs);
	} else {
		i->i_mode |= S_IFREG;
		if (!hpfs_inode->i_ea_mode) i->i_mode &= ~0111;
		i->i_op = &hpfs_file_iops;
		i->i_fop = &hpfs_file_ops;
		set_nlink(i, 1);
		i->i_size = le32_to_cpu(fnode->file_size);
		i->i_blocks = ((i->i_size + 511) >> 9) + 1;
		i->i_data.a_ops = &hpfs_aops;
		hpfs_i(i)->mmu_private = i->i_size;
	}
	brelse(bh);
}
Ejemplo n.º 9
0
/**
 * Swap the information from the given @inode and the inode
 * EXT4_BOOT_LOADER_INO. It will basically swap i_data and all other
 * important fields of the inodes.
 *
 * @sb:         the super block of the filesystem
 * @inode:      the inode to swap with EXT4_BOOT_LOADER_INO
 *
 */
static long swap_inode_boot_loader(struct super_block *sb,
				struct inode *inode)
{
	handle_t *handle;
	int err;
	struct inode *inode_bl;
	struct ext4_inode_info *ei_bl;
	struct ext4_sb_info *sbi = EXT4_SB(sb);

	if (inode->i_nlink != 1 || !S_ISREG(inode->i_mode))
		return -EINVAL;

	if (!inode_owner_or_capable(inode) || !capable(CAP_SYS_ADMIN))
		return -EPERM;

	inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO);
	if (IS_ERR(inode_bl))
		return PTR_ERR(inode_bl);
	ei_bl = EXT4_I(inode_bl);

	filemap_flush(inode->i_mapping);
	filemap_flush(inode_bl->i_mapping);

	/* Protect orig inodes against a truncate and make sure,
	 * that only 1 swap_inode_boot_loader is running. */
	lock_two_nondirectories(inode, inode_bl);

	truncate_inode_pages(&inode->i_data, 0);
	truncate_inode_pages(&inode_bl->i_data, 0);

	/* Wait for all existing dio workers */
	ext4_inode_block_unlocked_dio(inode);
	ext4_inode_block_unlocked_dio(inode_bl);
	inode_dio_wait(inode);
	inode_dio_wait(inode_bl);

	handle = ext4_journal_start(inode_bl, EXT4_HT_MOVE_EXTENTS, 2);
	if (IS_ERR(handle)) {
		err = -EINVAL;
		goto journal_err_out;
	}

	/* Protect extent tree against block allocations via delalloc */
	ext4_double_down_write_data_sem(inode, inode_bl);

	if (inode_bl->i_nlink == 0) {
		/* this inode has never been used as a BOOT_LOADER */
		set_nlink(inode_bl, 1);
		i_uid_write(inode_bl, 0);
		i_gid_write(inode_bl, 0);
		inode_bl->i_flags = 0;
		ei_bl->i_flags = 0;
		inode_bl->i_version = 1;
		i_size_write(inode_bl, 0);
		inode_bl->i_mode = S_IFREG;
		if (ext4_has_feature_extents(sb)) {
			ext4_set_inode_flag(inode_bl, EXT4_INODE_EXTENTS);
			ext4_ext_tree_init(handle, inode_bl);
		} else
			memset(ei_bl->i_data, 0, sizeof(ei_bl->i_data));
	}

	swap_inode_data(inode, inode_bl);

	inode->i_ctime = inode_bl->i_ctime = ext4_current_time(inode);

	spin_lock(&sbi->s_next_gen_lock);
	inode->i_generation = sbi->s_next_generation++;
	inode_bl->i_generation = sbi->s_next_generation++;
	spin_unlock(&sbi->s_next_gen_lock);

	ext4_discard_preallocations(inode);

	err = ext4_mark_inode_dirty(handle, inode);
	if (err < 0) {
		ext4_warning(inode->i_sb,
			"couldn't mark inode #%lu dirty (err %d)",
			inode->i_ino, err);
		/* Revert all changes: */
		swap_inode_data(inode, inode_bl);
	} else {
		err = ext4_mark_inode_dirty(handle, inode_bl);
		if (err < 0) {
			ext4_warning(inode_bl->i_sb,
				"couldn't mark inode #%lu dirty (err %d)",
				inode_bl->i_ino, err);
			/* Revert all changes: */
			swap_inode_data(inode, inode_bl);
			ext4_mark_inode_dirty(handle, inode);
		}
	}
	ext4_journal_stop(handle);
	ext4_double_up_write_data_sem(inode, inode_bl);

journal_err_out:
	ext4_inode_resume_unlocked_dio(inode);
	ext4_inode_resume_unlocked_dio(inode_bl);
	unlock_two_nondirectories(inode, inode_bl);
	iput(inode_bl);
	return err;
}
Ejemplo n.º 10
0
Archivo: rock.c Proyecto: 7799/linux
static int
parse_rock_ridge_inode_internal(struct iso_directory_record *de,
				struct inode *inode, int regard_xa)
{
	int symlink_len = 0;
	int cnt, sig;
	struct inode *reloc;
	struct rock_ridge *rr;
	int rootflag;
	struct rock_state rs;
	int ret = 0;

	if (!ISOFS_SB(inode->i_sb)->s_rock)
		return 0;

	init_rock_state(&rs, inode);
	setup_rock_ridge(de, inode, &rs);
	if (regard_xa) {
		rs.chr += 14;
		rs.len -= 14;
		if (rs.len < 0)
			rs.len = 0;
	}

repeat:
	while (rs.len > 2) { /* There may be one byte for padding somewhere */
		rr = (struct rock_ridge *)rs.chr;
		/*
		 * Ignore rock ridge info if rr->len is out of range, but
		 * don't return -EIO because that would make the file
		 * invisible.
		 */
		if (rr->len < 3)
			goto out;	/* Something got screwed up here */
		sig = isonum_721(rs.chr);
		if (rock_check_overflow(&rs, sig))
			goto eio;
		rs.chr += rr->len;
		rs.len -= rr->len;
		/*
		 * As above, just ignore the rock ridge info if rr->len
		 * is bogus.
		 */
		if (rs.len < 0)
			goto out;	/* Something got screwed up here */

		switch (sig) {
#ifndef CONFIG_ZISOFS		/* No flag for SF or ZF */
		case SIG('R', 'R'):
			if ((rr->u.RR.flags[0] &
			     (RR_PX | RR_TF | RR_SL | RR_CL)) == 0)
				goto out;
			break;
#endif
		case SIG('S', 'P'):
			if (check_sp(rr, inode))
				goto out;
			break;
		case SIG('C', 'E'):
			rs.cont_extent = isonum_733(rr->u.CE.extent);
			rs.cont_offset = isonum_733(rr->u.CE.offset);
			rs.cont_size = isonum_733(rr->u.CE.size);
			break;
		case SIG('E', 'R'):
			ISOFS_SB(inode->i_sb)->s_rock = 1;
			printk(KERN_DEBUG "ISO 9660 Extensions: ");
			{
				int p;
				for (p = 0; p < rr->u.ER.len_id; p++)
					printk("%c", rr->u.ER.data[p]);
			}
			printk("\n");
			break;
		case SIG('P', 'X'):
			inode->i_mode = isonum_733(rr->u.PX.mode);
			set_nlink(inode, isonum_733(rr->u.PX.n_links));
			i_uid_write(inode, isonum_733(rr->u.PX.uid));
			i_gid_write(inode, isonum_733(rr->u.PX.gid));
			break;
		case SIG('P', 'N'):
			{
				int high, low;
				high = isonum_733(rr->u.PN.dev_high);
				low = isonum_733(rr->u.PN.dev_low);
				/*
				 * The Rock Ridge standard specifies that if
				 * sizeof(dev_t) <= 4, then the high field is
				 * unused, and the device number is completely
				 * stored in the low field.  Some writers may
				 * ignore this subtlety,
				 * and as a result we test to see if the entire
				 * device number is
				 * stored in the low field, and use that.
				 */
				if ((low & ~0xff) && high == 0) {
					inode->i_rdev =
					    MKDEV(low >> 8, low & 0xff);
				} else {
					inode->i_rdev =
					    MKDEV(high, low);
				}
			}
			break;
		case SIG('T', 'F'):
			/*
			 * Some RRIP writers incorrectly place ctime in the
			 * TF_CREATE field. Try to handle this correctly for
			 * either case.
			 */
			/* Rock ridge never appears on a High Sierra disk */
			cnt = 0;
			if (rr->u.TF.flags & TF_CREATE) {
				inode->i_ctime.tv_sec =
				    iso_date(rr->u.TF.times[cnt++].time,
					     0);
				inode->i_ctime.tv_nsec = 0;
			}
			if (rr->u.TF.flags & TF_MODIFY) {
				inode->i_mtime.tv_sec =
				    iso_date(rr->u.TF.times[cnt++].time,
					     0);
				inode->i_mtime.tv_nsec = 0;
			}
			if (rr->u.TF.flags & TF_ACCESS) {
				inode->i_atime.tv_sec =
				    iso_date(rr->u.TF.times[cnt++].time,
					     0);
				inode->i_atime.tv_nsec = 0;
			}
			if (rr->u.TF.flags & TF_ATTRIBUTES) {
				inode->i_ctime.tv_sec =
				    iso_date(rr->u.TF.times[cnt++].time,
					     0);
				inode->i_ctime.tv_nsec = 0;
			}
			break;
		case SIG('S', 'L'):
			{
				int slen;
				struct SL_component *slp;
				struct SL_component *oldslp;
				slen = rr->len - 5;
				slp = &rr->u.SL.link;
				inode->i_size = symlink_len;
				while (slen > 1) {
					rootflag = 0;
					switch (slp->flags & ~1) {
					case 0:
						inode->i_size +=
						    slp->len;
						break;
					case 2:
						inode->i_size += 1;
						break;
					case 4:
						inode->i_size += 2;
						break;
					case 8:
						rootflag = 1;
						inode->i_size += 1;
						break;
					default:
						printk("Symlink component flag "
							"not implemented\n");
					}
					slen -= slp->len + 2;
					oldslp = slp;
					slp = (struct SL_component *)
						(((char *)slp) + slp->len + 2);

					if (slen < 2) {
						if (((rr->u.SL.
						      flags & 1) != 0)
						    &&
						    ((oldslp->
						      flags & 1) == 0))
							inode->i_size +=
							    1;
						break;
					}

					/*
					 * If this component record isn't
					 * continued, then append a '/'.
					 */
					if (!rootflag
					    && (oldslp->flags & 1) == 0)
						inode->i_size += 1;
				}
			}
			symlink_len = inode->i_size;
			break;
		case SIG('R', 'E'):
			printk(KERN_WARNING "Attempt to read inode for "
					"relocated directory\n");
			goto out;
		case SIG('C', 'L'):
			ISOFS_I(inode)->i_first_extent =
			    isonum_733(rr->u.CL.location);
			reloc =
			    isofs_iget(inode->i_sb,
				       ISOFS_I(inode)->i_first_extent,
				       0);
			if (IS_ERR(reloc)) {
				ret = PTR_ERR(reloc);
				goto out;
			}
			inode->i_mode = reloc->i_mode;
			set_nlink(inode, reloc->i_nlink);
			inode->i_uid = reloc->i_uid;
			inode->i_gid = reloc->i_gid;
			inode->i_rdev = reloc->i_rdev;
			inode->i_size = reloc->i_size;
			inode->i_blocks = reloc->i_blocks;
			inode->i_atime = reloc->i_atime;
			inode->i_ctime = reloc->i_ctime;
			inode->i_mtime = reloc->i_mtime;
			iput(reloc);
			break;
#ifdef CONFIG_ZISOFS
		case SIG('Z', 'F'): {
			int algo;

			if (ISOFS_SB(inode->i_sb)->s_nocompress)
				break;
			algo = isonum_721(rr->u.ZF.algorithm);
			if (algo == SIG('p', 'z')) {
				int block_shift =
					isonum_711(&rr->u.ZF.parms[1]);
				if (block_shift > 17) {
					printk(KERN_WARNING "isofs: "
						"Can't handle ZF block "
						"size of 2^%d\n",
						block_shift);
				} else {
					/*
					 * Note: we don't change
					 * i_blocks here
					 */
					ISOFS_I(inode)->i_file_format =
						isofs_file_compressed;
					/*
					 * Parameters to compression
					 * algorithm (header size,
					 * block size)
					 */
					ISOFS_I(inode)->i_format_parm[0] =
						isonum_711(&rr->u.ZF.parms[0]);
					ISOFS_I(inode)->i_format_parm[1] =
						isonum_711(&rr->u.ZF.parms[1]);
					inode->i_size =
					    isonum_733(rr->u.ZF.
						       real_size);
				}
			} else {
				printk(KERN_WARNING
				       "isofs: Unknown ZF compression "
						"algorithm: %c%c\n",
				       rr->u.ZF.algorithm[0],
				       rr->u.ZF.algorithm[1]);
			}
			break;
		}
#endif
		default:
			break;
		}
Ejemplo n.º 11
0
Archivo: inode.c Proyecto: Lyude/linux
static int do_read_inode(struct inode *inode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct f2fs_inode_info *fi = F2FS_I(inode);
	struct page *node_page;
	struct f2fs_inode *ri;
	projid_t i_projid;
	int err;

	/* Check if ino is within scope */
	if (f2fs_check_nid_range(sbi, inode->i_ino))
		return -EINVAL;

	node_page = f2fs_get_node_page(sbi, inode->i_ino);
	if (IS_ERR(node_page))
		return PTR_ERR(node_page);

	ri = F2FS_INODE(node_page);

	inode->i_mode = le16_to_cpu(ri->i_mode);
	i_uid_write(inode, le32_to_cpu(ri->i_uid));
	i_gid_write(inode, le32_to_cpu(ri->i_gid));
	set_nlink(inode, le32_to_cpu(ri->i_links));
	inode->i_size = le64_to_cpu(ri->i_size);
	inode->i_blocks = SECTOR_FROM_BLOCK(le64_to_cpu(ri->i_blocks) - 1);

	inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime);
	inode->i_ctime.tv_sec = le64_to_cpu(ri->i_ctime);
	inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime);
	inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
	inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
	inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
	inode->i_generation = le32_to_cpu(ri->i_generation);
	if (S_ISDIR(inode->i_mode))
		fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
	else if (S_ISREG(inode->i_mode))
		fi->i_gc_failures[GC_FAILURE_PIN] =
					le16_to_cpu(ri->i_gc_failures);
	fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
	fi->i_flags = le32_to_cpu(ri->i_flags);
	fi->flags = 0;
	fi->i_advise = ri->i_advise;
	fi->i_pino = le32_to_cpu(ri->i_pino);
	fi->i_dir_level = ri->i_dir_level;

	if (f2fs_init_extent_tree(inode, &ri->i_ext))
		set_page_dirty(node_page);

	get_inline_info(inode, ri);

	fi->i_extra_isize = f2fs_has_extra_attr(inode) ?
					le16_to_cpu(ri->i_extra_isize) : 0;

	if (f2fs_sb_has_flexible_inline_xattr(sbi->sb)) {
		fi->i_inline_xattr_size = le16_to_cpu(ri->i_inline_xattr_size);
	} else if (f2fs_has_inline_xattr(inode) ||
				f2fs_has_inline_dentry(inode)) {
		fi->i_inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
	} else {

		/*
		 * Previous inline data or directory always reserved 200 bytes
		 * in inode layout, even if inline_xattr is disabled. In order
		 * to keep inline_dentry's structure for backward compatibility,
		 * we get the space back only from inline_data.
		 */
		fi->i_inline_xattr_size = 0;
	}

	if (!sanity_check_inode(inode, node_page)) {
		f2fs_put_page(node_page, 1);
		return -EINVAL;
	}

	/* check data exist */
	if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
		__recover_inline_status(inode, node_page);

	/* get rdev by using inline_info */
	__get_inode_rdev(inode, ri);

	if (S_ISREG(inode->i_mode)) {
		err = __written_first_block(sbi, ri);
		if (err < 0) {
			f2fs_put_page(node_page, 1);
			return err;
		}
		if (!err)
			set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
	}

	if (!f2fs_need_inode_block_update(sbi, inode->i_ino))
		fi->last_disk_size = inode->i_size;

	if (fi->i_flags & F2FS_PROJINHERIT_FL)
		set_inode_flag(inode, FI_PROJ_INHERIT);

	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_project_quota(sbi->sb) &&
			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid))
		i_projid = (projid_t)le32_to_cpu(ri->i_projid);
	else
		i_projid = F2FS_DEF_PROJID;
	fi->i_projid = make_kprojid(&init_user_ns, i_projid);

	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(sbi->sb) &&
			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
		fi->i_crtime.tv_sec = le64_to_cpu(ri->i_crtime);
		fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec);
	}

	F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
	F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
	F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
	F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
	f2fs_put_page(node_page, 1);

	stat_inc_inline_xattr(inode);
	stat_inc_inline_inode(inode);
	stat_inc_inline_dir(inode);

	return 0;
}
Ejemplo n.º 12
0
/**
 * Swap the information from the given @inode and the inode
 * EXT4_BOOT_LOADER_INO. It will basically swap i_data and all other
 * important fields of the inodes.
 *
 * @sb:         the super block of the filesystem
 * @inode:      the inode to swap with EXT4_BOOT_LOADER_INO
 *
 */
static long swap_inode_boot_loader(struct super_block *sb,
				struct inode *inode)
{
	handle_t *handle;
	int err;
	struct inode *inode_bl;
	struct ext4_inode_info *ei_bl;
	qsize_t size, size_bl, diff;
	blkcnt_t blocks;
	unsigned short bytes;

	inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO, EXT4_IGET_SPECIAL);
	if (IS_ERR(inode_bl))
		return PTR_ERR(inode_bl);
	ei_bl = EXT4_I(inode_bl);

	/* Protect orig inodes against a truncate and make sure,
	 * that only 1 swap_inode_boot_loader is running. */
	lock_two_nondirectories(inode, inode_bl);

	if (inode->i_nlink != 1 || !S_ISREG(inode->i_mode) ||
	    IS_SWAPFILE(inode) || IS_ENCRYPTED(inode) ||
	    (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL) ||
	    ext4_has_inline_data(inode)) {
		err = -EINVAL;
		goto journal_err_out;
	}

	if (IS_RDONLY(inode) || IS_APPEND(inode) || IS_IMMUTABLE(inode) ||
	    !inode_owner_or_capable(inode) || !capable(CAP_SYS_ADMIN)) {
		err = -EPERM;
		goto journal_err_out;
	}

	down_write(&EXT4_I(inode)->i_mmap_sem);
	err = filemap_write_and_wait(inode->i_mapping);
	if (err)
		goto err_out;

	err = filemap_write_and_wait(inode_bl->i_mapping);
	if (err)
		goto err_out;

	/* Wait for all existing dio workers */
	inode_dio_wait(inode);
	inode_dio_wait(inode_bl);

	truncate_inode_pages(&inode->i_data, 0);
	truncate_inode_pages(&inode_bl->i_data, 0);

	handle = ext4_journal_start(inode_bl, EXT4_HT_MOVE_EXTENTS, 2);
	if (IS_ERR(handle)) {
		err = -EINVAL;
		goto err_out;
	}

	/* Protect extent tree against block allocations via delalloc */
	ext4_double_down_write_data_sem(inode, inode_bl);

	if (inode_bl->i_nlink == 0) {
		/* this inode has never been used as a BOOT_LOADER */
		set_nlink(inode_bl, 1);
		i_uid_write(inode_bl, 0);
		i_gid_write(inode_bl, 0);
		inode_bl->i_flags = 0;
		ei_bl->i_flags = 0;
		inode_set_iversion(inode_bl, 1);
		i_size_write(inode_bl, 0);
		inode_bl->i_mode = S_IFREG;
		if (ext4_has_feature_extents(sb)) {
			ext4_set_inode_flag(inode_bl, EXT4_INODE_EXTENTS);
			ext4_ext_tree_init(handle, inode_bl);
		} else
			memset(ei_bl->i_data, 0, sizeof(ei_bl->i_data));
	}

	err = dquot_initialize(inode);
	if (err)
		goto err_out1;

	size = (qsize_t)(inode->i_blocks) * (1 << 9) + inode->i_bytes;
	size_bl = (qsize_t)(inode_bl->i_blocks) * (1 << 9) + inode_bl->i_bytes;
	diff = size - size_bl;
	swap_inode_data(inode, inode_bl);

	inode->i_ctime = inode_bl->i_ctime = current_time(inode);

	inode->i_generation = prandom_u32();
	inode_bl->i_generation = prandom_u32();
	reset_inode_seed(inode);
	reset_inode_seed(inode_bl);

	ext4_discard_preallocations(inode);

	err = ext4_mark_inode_dirty(handle, inode);
	if (err < 0) {
		/* No need to update quota information. */
		ext4_warning(inode->i_sb,
			"couldn't mark inode #%lu dirty (err %d)",
			inode->i_ino, err);
		/* Revert all changes: */
		swap_inode_data(inode, inode_bl);
		ext4_mark_inode_dirty(handle, inode);
		goto err_out1;
	}

	blocks = inode_bl->i_blocks;
	bytes = inode_bl->i_bytes;
	inode_bl->i_blocks = inode->i_blocks;
	inode_bl->i_bytes = inode->i_bytes;
	err = ext4_mark_inode_dirty(handle, inode_bl);
	if (err < 0) {
		/* No need to update quota information. */
		ext4_warning(inode_bl->i_sb,
			"couldn't mark inode #%lu dirty (err %d)",
			inode_bl->i_ino, err);
		goto revert;
	}

	/* Bootloader inode should not be counted into quota information. */
	if (diff > 0)
		dquot_free_space(inode, diff);
	else
		err = dquot_alloc_space(inode, -1 * diff);

	if (err < 0) {
revert:
		/* Revert all changes: */
		inode_bl->i_blocks = blocks;
		inode_bl->i_bytes = bytes;
		swap_inode_data(inode, inode_bl);
		ext4_mark_inode_dirty(handle, inode);
		ext4_mark_inode_dirty(handle, inode_bl);
	}

err_out1:
	ext4_journal_stop(handle);
	ext4_double_up_write_data_sem(inode, inode_bl);

err_out:
	up_write(&EXT4_I(inode)->i_mmap_sem);
journal_err_out:
	unlock_two_nondirectories(inode, inode_bl);
	iput(inode_bl);
	return err;
}
Ejemplo n.º 13
0
struct inode *efs_iget(struct super_block *super, unsigned long ino)
{
	int i, inode_index;
	dev_t device;
	u32 rdev;
	struct buffer_head *bh;
	struct efs_sb_info    *sb = SUPER_INFO(super);
	struct efs_inode_info *in;
	efs_block_t block, offset;
	struct efs_dinode *efs_inode;
	struct inode *inode;

	inode = iget_locked(super, ino);
	if (!inode)
		return ERR_PTR(-ENOMEM);
	if (!(inode->i_state & I_NEW))
		return inode;

	in = INODE_INFO(inode);

	/*
	** EFS layout:
	**
	** |   cylinder group    |   cylinder group    |   cylinder group ..etc
	** |inodes|data          |inodes|data          |inodes|data       ..etc
	**
	** work out the inode block index, (considering initially that the
	** inodes are stored as consecutive blocks). then work out the block
	** number of that inode given the above layout, and finally the
	** offset of the inode within that block.
	*/

	inode_index = inode->i_ino /
		(EFS_BLOCKSIZE / sizeof(struct efs_dinode));

	block = sb->fs_start + sb->first_block + 
		(sb->group_size * (inode_index / sb->inode_blocks)) +
		(inode_index % sb->inode_blocks);

	offset = (inode->i_ino %
			(EFS_BLOCKSIZE / sizeof(struct efs_dinode))) *
		sizeof(struct efs_dinode);

	bh = sb_bread(inode->i_sb, block);
	if (!bh) {
		pr_warn("%s() failed at block %d\n", __func__, block);
		goto read_inode_error;
	}

	efs_inode = (struct efs_dinode *) (bh->b_data + offset);
    
	inode->i_mode  = be16_to_cpu(efs_inode->di_mode);
	set_nlink(inode, be16_to_cpu(efs_inode->di_nlink));
	i_uid_write(inode, (uid_t)be16_to_cpu(efs_inode->di_uid));
	i_gid_write(inode, (gid_t)be16_to_cpu(efs_inode->di_gid));
	inode->i_size  = be32_to_cpu(efs_inode->di_size);
	inode->i_atime.tv_sec = be32_to_cpu(efs_inode->di_atime);
	inode->i_mtime.tv_sec = be32_to_cpu(efs_inode->di_mtime);
	inode->i_ctime.tv_sec = be32_to_cpu(efs_inode->di_ctime);
	inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = 0;

	/* this is the number of blocks in the file */
	if (inode->i_size == 0) {
		inode->i_blocks = 0;
	} else {
		inode->i_blocks = ((inode->i_size - 1) >> EFS_BLOCKSIZE_BITS) + 1;
	}

	rdev = be16_to_cpu(efs_inode->di_u.di_dev.odev);
	if (rdev == 0xffff) {
		rdev = be32_to_cpu(efs_inode->di_u.di_dev.ndev);
		if (sysv_major(rdev) > 0xfff)
			device = 0;
		else
			device = MKDEV(sysv_major(rdev), sysv_minor(rdev));
	} else
		device = old_decode_dev(rdev);

	/* get the number of extents for this object */
	in->numextents = be16_to_cpu(efs_inode->di_numextents);
	in->lastextent = 0;

	/* copy the extents contained within the inode to memory */
	for(i = 0; i < EFS_DIRECTEXTENTS; i++) {
		extent_copy(&(efs_inode->di_u.di_extents[i]), &(in->extents[i]));
		if (i < in->numextents && in->extents[i].cooked.ex_magic != 0) {
			pr_warn("extent %d has bad magic number in inode %lu\n",
				i, inode->i_ino);
			brelse(bh);
			goto read_inode_error;
		}
	}

	brelse(bh);
	pr_debug("efs_iget(): inode %lu, extents %d, mode %o\n",
		 inode->i_ino, in->numextents, inode->i_mode);
	switch (inode->i_mode & S_IFMT) {
		case S_IFDIR: 
			inode->i_op = &efs_dir_inode_operations; 
			inode->i_fop = &efs_dir_operations; 
			break;
		case S_IFREG:
			inode->i_fop = &generic_ro_fops;
			inode->i_data.a_ops = &efs_aops;
			break;
		case S_IFLNK:
			inode->i_op = &page_symlink_inode_operations;
			inode->i_data.a_ops = &efs_symlink_aops;
			break;
		case S_IFCHR:
		case S_IFBLK:
		case S_IFIFO:
			init_special_inode(inode, inode->i_mode, device);
			break;
		default:
			pr_warn("unsupported inode mode %o\n", inode->i_mode);
			goto read_inode_error;
			break;
	}

	unlock_new_inode(inode);
	return inode;
        
read_inode_error:
	pr_warn("failed to read inode %lu\n", inode->i_ino);
	iget_failed(inode);
	return ERR_PTR(-EIO);
}