Exemple #1
0
/*
 * This function prepares command to set/get SNMP MIB.
 *
 * Preparation includes -
 *      - Setting command ID, action and proper size
 *      - Setting SNMP MIB OID number and value
 *        (as required)
 *      - Ensuring correct endian-ness
 *
 * The following SNMP MIB OIDs are supported -
 *      - FRAG_THRESH_I     : Fragmentation threshold
 *      - RTS_THRESH_I      : RTS threshold
 *      - SHORT_RETRY_LIM_I : Short retry limit
 *      - DOT11D_I          : 11d support
 */
static int mwifiex_cmd_802_11_snmp_mib(struct mwifiex_private *priv,
				       struct host_cmd_ds_command *cmd,
				       u16 cmd_action, u32 cmd_oid,
				       u16 *ul_temp)
{
	struct host_cmd_ds_802_11_snmp_mib *snmp_mib = &cmd->params.smib;

	dev_dbg(priv->adapter->dev, "cmd: SNMP_CMD: cmd_oid = 0x%x\n", cmd_oid);
	cmd->command = cpu_to_le16(HostCmd_CMD_802_11_SNMP_MIB);
	cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_802_11_snmp_mib)
				- 1 + S_DS_GEN);

	snmp_mib->oid = cpu_to_le16((u16)cmd_oid);
	if (cmd_action == HostCmd_ACT_GEN_GET) {
		snmp_mib->query_type = cpu_to_le16(HostCmd_ACT_GEN_GET);
		snmp_mib->buf_size = cpu_to_le16(MAX_SNMP_BUF_SIZE);
		le16_add_cpu(&cmd->size, MAX_SNMP_BUF_SIZE);
	} else if (cmd_action == HostCmd_ACT_GEN_SET) {
		snmp_mib->query_type = cpu_to_le16(HostCmd_ACT_GEN_SET);
		snmp_mib->buf_size = cpu_to_le16(sizeof(u16));
		*((__le16 *) (snmp_mib->value)) = cpu_to_le16(*ul_temp);
		le16_add_cpu(&cmd->size, sizeof(u16));
	}

	dev_dbg(priv->adapter->dev,
		"cmd: SNMP_CMD: Action=0x%x, OID=0x%x, OIDSize=0x%x,"
		" Value=0x%x\n",
		cmd_action, cmd_oid, le16_to_cpu(snmp_mib->buf_size),
		le16_to_cpu(*(__le16 *) snmp_mib->value));
	return 0;
}
Exemple #2
0
Fichier : ie.c Projet : 7799/linux
/* This function checks if the vendor specified IE is present in passed buffer
 * and copies it to mwifiex_ie structure.
 * Function takes pointer to struct mwifiex_ie pointer as argument.
 * If the vendor specified IE is present then memory is allocated for
 * mwifiex_ie pointer and filled in with IE. Caller should take care of freeing
 * this memory.
 */
static int mwifiex_update_vs_ie(const u8 *ies, int ies_len,
				struct mwifiex_ie **ie_ptr, u16 mask,
				unsigned int oui, u8 oui_type)
{
	struct ieee_types_header *vs_ie;
	struct mwifiex_ie *ie = *ie_ptr;
	const u8 *vendor_ie;

	vendor_ie = cfg80211_find_vendor_ie(oui, oui_type, ies, ies_len);
	if (vendor_ie) {
		if (!*ie_ptr) {
			*ie_ptr = kzalloc(sizeof(struct mwifiex_ie),
					  GFP_KERNEL);
			if (!*ie_ptr)
				return -ENOMEM;
			ie = *ie_ptr;
		}

		vs_ie = (struct ieee_types_header *)vendor_ie;
		memcpy(ie->ie_buffer + le16_to_cpu(ie->ie_length),
		       vs_ie, vs_ie->len + 2);
		le16_add_cpu(&ie->ie_length, vs_ie->len + 2);
		ie->mgmt_subtype_mask = cpu_to_le16(mask);
		ie->ie_index = cpu_to_le16(MWIFIEX_AUTO_IDX_MASK);
	}

	*ie_ptr = ie;
	return 0;
}
Exemple #3
0
static void ocfs2_bg_discontig_add_extent(struct ocfs2_super *osb,
					  struct ocfs2_group_desc *bg,
					  struct ocfs2_chain_list *cl,
					  u64 p_blkno, unsigned int clusters)
{
	struct ocfs2_extent_list *el = &bg->bg_list;
	struct ocfs2_extent_rec *rec;

	BUG_ON(!ocfs2_supports_discontig_bg(osb));
	if (!el->l_next_free_rec)
		el->l_count = cpu_to_le16(ocfs2_extent_recs_per_gd(osb->sb));
	rec = &el->l_recs[le16_to_cpu(el->l_next_free_rec)];
	rec->e_blkno = cpu_to_le64(p_blkno);
	rec->e_cpos = cpu_to_le32(le16_to_cpu(bg->bg_bits) /
				  le16_to_cpu(cl->cl_bpc));
	rec->e_leaf_clusters = cpu_to_le16(clusters);
	le16_add_cpu(&bg->bg_bits, clusters * le16_to_cpu(cl->cl_bpc));
	le16_add_cpu(&bg->bg_free_bits_count,
		     clusters * le16_to_cpu(cl->cl_bpc));
	le16_add_cpu(&el->l_next_free_rec, 1);
}
Exemple #4
0
static inline int ocfs2_block_group_clear_bits(handle_t *handle,
					       struct inode *alloc_inode,
					       struct ocfs2_group_desc *bg,
					       struct buffer_head *group_bh,
					       unsigned int bit_off,
					       unsigned int num_bits)
{
	int status;
	unsigned int tmp;
	int journal_type = OCFS2_JOURNAL_ACCESS_WRITE;
	struct ocfs2_group_desc *undo_bg = NULL;

	mlog_entry_void();

	if (!OCFS2_IS_VALID_GROUP_DESC(bg)) {
		OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, bg);
		status = -EIO;
		goto bail;
	}

	mlog(0, "off = %u, num = %u\n", bit_off, num_bits);

	if (ocfs2_is_cluster_bitmap(alloc_inode))
		journal_type = OCFS2_JOURNAL_ACCESS_UNDO;

	status = ocfs2_journal_access(handle, alloc_inode, group_bh,
				      journal_type);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

	if (ocfs2_is_cluster_bitmap(alloc_inode))
		undo_bg = (struct ocfs2_group_desc *) bh2jh(group_bh)->b_committed_data;

	tmp = num_bits;
	while(tmp--) {
		ocfs2_clear_bit((bit_off + tmp),
				(unsigned long *) bg->bg_bitmap);
		if (ocfs2_is_cluster_bitmap(alloc_inode))
			ocfs2_set_bit(bit_off + tmp,
				      (unsigned long *) undo_bg->bg_bitmap);
	}
	le16_add_cpu(&bg->bg_free_bits_count, num_bits);

	status = ocfs2_journal_dirty(handle, group_bh);
	if (status < 0)
		mlog_errno(status);
bail:
	return status;
}
Exemple #5
0
static inline int ocfs2_block_group_set_bits(handle_t *handle,
					     struct inode *alloc_inode,
					     struct ocfs2_group_desc *bg,
					     struct buffer_head *group_bh,
					     unsigned int bit_off,
					     unsigned int num_bits)
{
	int status;
	void *bitmap = bg->bg_bitmap;
	int journal_type = OCFS2_JOURNAL_ACCESS_WRITE;

	mlog_entry_void();

	if (!OCFS2_IS_VALID_GROUP_DESC(bg)) {
		OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, bg);
		status = -EIO;
		goto bail;
	}
	BUG_ON(le16_to_cpu(bg->bg_free_bits_count) < num_bits);

	mlog(0, "block_group_set_bits: off = %u, num = %u\n", bit_off,
	     num_bits);

	if (ocfs2_is_cluster_bitmap(alloc_inode))
		journal_type = OCFS2_JOURNAL_ACCESS_UNDO;

	status = ocfs2_journal_access(handle,
				      alloc_inode,
				      group_bh,
				      journal_type);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

	le16_add_cpu(&bg->bg_free_bits_count, -num_bits);

	while(num_bits--)
		ocfs2_set_bit(bit_off++, bitmap);

	status = ocfs2_journal_dirty(handle,
				     group_bh);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

bail:
	mlog_exit(status);
	return status;
}
Exemple #6
0
static int ext2_setup_super (struct super_block * sb,
                             struct ext2_super_block * es,
                             int read_only)
{
    int res = 0;
    struct ext2_sb_info *sbi = EXT2_SB(sb);

    if (le32_to_cpu(es->s_rev_level) > EXT2_MAX_SUPP_REV) {
        ext2_msg(sb, KERN_ERR,
                 "error: revision level too high, "
                 "forcing read-only mode");
        res = MS_RDONLY;
    }
    if (read_only)
        return res;
#ifndef MY_ABC_HERE
    if (!(sbi->s_mount_state & EXT2_VALID_FS))
        ext2_msg(sb, KERN_WARNING,
                 "warning: mounting unchecked fs, "
                 "running e2fsck is recommended");
    else if ((sbi->s_mount_state & EXT2_ERROR_FS))
        ext2_msg(sb, KERN_WARNING,
                 "warning: mounting fs with errors, "
                 "running e2fsck is recommended");
    else if ((__s16) le16_to_cpu(es->s_max_mnt_count) >= 0 &&
             le16_to_cpu(es->s_mnt_count) >=
             (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
        ext2_msg(sb, KERN_WARNING,
                 "warning: maximal mount count reached, "
                 "running e2fsck is recommended");
    else if (le32_to_cpu(es->s_checkinterval) &&
             (le32_to_cpu(es->s_lastcheck) +
              le32_to_cpu(es->s_checkinterval) <= get_seconds()))
        ext2_msg(sb, KERN_WARNING,
                 "warning: checktime reached, "
                 "running e2fsck is recommended");
#endif
    if (!le16_to_cpu(es->s_max_mnt_count))
        es->s_max_mnt_count = cpu_to_le16(EXT2_DFL_MAX_MNT_COUNT);
    le16_add_cpu(&es->s_mnt_count, 1);
    if (test_opt (sb, DEBUG))
        ext2_msg(sb, KERN_INFO, "%s, %s, bs=%lu, fs=%lu, gc=%lu, "
                 "bpg=%lu, ipg=%lu, mo=%04lx]",
                 EXT2FS_VERSION, EXT2FS_DATE, sb->s_blocksize,
                 sbi->s_frag_size,
                 sbi->s_groups_count,
                 EXT2_BLOCKS_PER_GROUP(sb),
                 EXT2_INODES_PER_GROUP(sb),
                 sbi->s_mount_opt);
    return res;
}
Exemple #7
0
static inline int ocfs2_block_group_set_bits(handle_t *handle,
					     struct inode *alloc_inode,
					     struct ocfs2_group_desc *bg,
					     struct buffer_head *group_bh,
					     unsigned int bit_off,
					     unsigned int num_bits)
{
	int status;
	void *bitmap = bg->bg_bitmap;
	int journal_type = OCFS2_JOURNAL_ACCESS_WRITE;

	mlog_entry_void();

	/* All callers get the descriptor via
	 * ocfs2_read_group_descriptor().  Any corruption is a code bug. */
	BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg));
	BUG_ON(le16_to_cpu(bg->bg_free_bits_count) < num_bits);

	mlog(0, "block_group_set_bits: off = %u, num = %u\n", bit_off,
	     num_bits);

	if (ocfs2_is_cluster_bitmap(alloc_inode))
		journal_type = OCFS2_JOURNAL_ACCESS_UNDO;

	status = ocfs2_journal_access_gd(handle,
					 INODE_CACHE(alloc_inode),
					 group_bh,
					 journal_type);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

	le16_add_cpu(&bg->bg_free_bits_count, -num_bits);

	while(num_bits--)
		ocfs2_set_bit(bit_off++, bitmap);

	status = ocfs2_journal_dirty(handle,
				     group_bh);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

bail:
	mlog_exit(status);
	return status;
}
static inline int ocfs2_block_group_set_bits(handle_t *handle,
					     struct inode *alloc_inode,
					     struct ocfs2_group_desc *bg,
					     struct buffer_head *group_bh,
					     unsigned int bit_off,
					     unsigned int num_bits)
{
	int status;
	void *bitmap = bg->bg_bitmap;
	int journal_type = OCFS2_JOURNAL_ACCESS_WRITE;

	/* All callers get the descriptor via
	 * ocfs2_read_group_descriptor().  Any corruption is a code bug. */
	BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg));
	BUG_ON(le16_to_cpu(bg->bg_free_bits_count) < num_bits);

	mlog(0, "block_group_set_bits: off = %u, num = %u\n", bit_off,
	     num_bits);

	if (ocfs2_is_cluster_bitmap(alloc_inode))
		journal_type = OCFS2_JOURNAL_ACCESS_UNDO;

	status = ocfs2_journal_access_gd(handle,
					 INODE_CACHE(alloc_inode),
					 group_bh,
					 journal_type);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

	le16_add_cpu(&bg->bg_free_bits_count, -num_bits);
	if (le16_to_cpu(bg->bg_free_bits_count) > le16_to_cpu(bg->bg_bits)) {
		ocfs2_error(alloc_inode->i_sb, "Group descriptor # %llu has bit"
			    " count %u but claims %u are freed. num_bits %d",
			    (unsigned long long)le64_to_cpu(bg->bg_blkno),
			    le16_to_cpu(bg->bg_bits),
			    le16_to_cpu(bg->bg_free_bits_count), num_bits);
		return -EROFS;
	}
	while (num_bits--)
		ocfs2_set_bit(bit_off++, bitmap);

	ocfs2_journal_dirty(handle, group_bh);

bail:
	return status;
}
/*
_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/
	Function	:me2fsAllocNewInode
	Input		:struct inode *dir
				 < vfs inode of directory >
				 umode_t mode
				 < file mode >
				 const struct qstr *qstr
				 < entry name for new inode >
	Output		:void
	Return		:struct inode*
				 < new allocated inode >

	Description	:allocate new inode
_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/
*/
struct inode*
me2fsAllocNewInode( struct inode *dir, umode_t mode, const struct qstr *qstr )
{
	struct super_block		*sb;
	struct buffer_head		*bitmap_bh;
	struct buffer_head		*bh_gdesc;

	struct inode			*inode;			/* new inode */
	ino_t					ino;
	struct ext2_group_desc	*gdesc;
	struct ext2_super_block	*esb;
	struct me2fs_inode_info	*mi;
	struct me2fs_sb_info	*msi;

	unsigned long			group;
	int						i;
	int						err;

	/* ------------------------------------------------------------------------ */
	/* allocate vfs new inode													*/
	/* ------------------------------------------------------------------------ */
	sb = dir->i_sb;

	if( !( inode = new_inode( sb ) ) )
	{
		return( ERR_PTR( -ENOMEM ) );
	}

	bitmap_bh	= NULL;
	ino			= 0;

	msi			= ME2FS_SB( sb );

	if( S_ISDIR( mode ) )
	{
		group = findDirectoryGroup( sb, dir );
	}
	else
	{
		/* -------------------------------------------------------------------- */
		/* as for now allocating inode for file is not support					*/
		/* -------------------------------------------------------------------- */
		err = -ENOSPC;
		goto fail;
	}

	if( group == -1 )
	{
		err = -ENOSPC;
		goto fail;
	}

	for( i = 0 ; i < msi->s_groups_count ; i++ )
	{
		brelse( bitmap_bh );
		if( !( bitmap_bh = readInodeBitmap( sb, group ) ) )
		{
			err = -EIO;
			goto fail;
		}

		ino = 0;

		/* -------------------------------------------------------------------- */
		/* find free inode														*/
		/* -------------------------------------------------------------------- */
repeat_in_this_group:
		ino = find_next_zero_bit_le( ( unsigned long* )bitmap_bh->b_data,
									 msi->s_inodes_per_group,
									 ino );

		if( ME2FS_SB( sb )->s_inodes_per_group <= ino )
		{
			/* cannot find ino. bitmap is already full							*/
			group++;

			if( group <= msi->s_groups_count )
			{
				group = 0;
			}

			continue;
		}

		/* -------------------------------------------------------------------- */
		/* allocate inode atomically											*/
		/* -------------------------------------------------------------------- */
		if( ext2_set_bit_atomic( getSbBlockGroupLock( msi, group ),
								 ( int )ino,
								 bitmap_bh->b_data ) )
		{
			/* ---------------------------------------------------------------- */
			/* already set the bitmap											*/
			/* ---------------------------------------------------------------- */
			ino++;
			if( msi->s_inodes_per_group <= ino )
			{
				/* the group has no entry, try next								*/
				group++;
				if( msi->s_groups_count <= group )
				{
					group = 0;
				}
				continue;
			}

			/* try to find in the same group									*/
			goto repeat_in_this_group;
		}

		goto got;
	}

	/* ------------------------------------------------------------------------ */
	/* cannot find free inode													*/
	/* ------------------------------------------------------------------------ */
	err = -ENOSPC;
	goto fail;

	/* ------------------------------------------------------------------------ */
	/* found free inode															*/
	/* ------------------------------------------------------------------------ */
got:
	mi		= ME2FS_I( inode );
	esb		= msi->s_esb;

	mark_buffer_dirty( bitmap_bh );
	if( sb->s_flags & MS_SYNCHRONOUS )
	{
		sync_dirty_buffer( bitmap_bh );
	}
	brelse( bitmap_bh );

	/* ------------------------------------------------------------------------ */
	/* get absolute inode number												*/
	/* ------------------------------------------------------------------------ */
	ino	+= ( group * ME2FS_SB( sb )->s_inodes_per_group ) + 1;

	if( ( ino < msi->s_first_ino ) ||
		( le32_to_cpu( esb->s_inodes_count ) < ino ) )
	{
		ME2FS_ERROR( "<ME2FS>%s:insane inode number. ino=%lu,group=%lu\n",
					  __func__, ( unsigned long )ino, group );
		err = -EIO;
		goto fail;
	}

	/* ------------------------------------------------------------------------ */
	/* update group descriptor													*/
	/* ------------------------------------------------------------------------ */
	gdesc		= me2fsGetGroupDescriptor( sb, group );
	bh_gdesc	= me2fsGetGdescBufferCache( sb, group );

	percpu_counter_add( &msi->s_freeinodes_counter, -1 );

	if( S_ISDIR( mode ) )
	{
		percpu_counter_inc( &msi->s_dirs_counter );
	}

	spin_lock( getSbBlockGroupLock( msi, group ) );
	{
		le16_add_cpu( &gdesc->bg_free_inodes_count, -1 );
		if( S_ISDIR( mode ) )
		{
			le16_add_cpu( &gdesc->bg_used_dirs_count, 1 );
		}
	}
	spin_unlock( getSbBlockGroupLock( msi, group ) );

	mark_buffer_dirty( bh_gdesc );

	/* ------------------------------------------------------------------------ */
	/* initialize vfs inode														*/
	/* ------------------------------------------------------------------------ */
	inode_init_owner( inode, dir, mode );

	inode->i_ino	= ino;
	inode->i_blocks	= 0;
	inode->i_mtime	= CURRENT_TIME_SEC;
	inode->i_atime	= inode->i_mtime;
	inode->i_ctime	= inode->i_mtime;

	/* ------------------------------------------------------------------------ */
	/* initialize me2fs inode information										*/
	/* ------------------------------------------------------------------------ */
	memset( mi->i_data, 0, sizeof( mi->i_data ) );

	mi->i_flags = ME2FS_I( dir )->i_flags & EXT2_FL_INHERITED;

	if( S_ISDIR( mode ) )
	{
		/* do nothing															*/
	}
	else if( S_ISREG( mode ) )
	{
		mi->i_flags &= EXT2_REG_FLMASK;
	}
	else
	{
		mi->i_flags &= EXT2_OTHER_FLMASK;
	}

	mi->i_faddr				= 0;
	mi->i_frag_no			= 0;
	mi->i_frag_size			= 0;
	mi->i_file_acl			= 0;
	mi->i_dir_acl			= 0;
	mi->i_dtime				= 0;
	//mi->i_block_alloc_info	= NULL;
	mi->i_state				= EXT2_STATE_NEW;

	me2fsSetVfsInodeFlags( inode );
	
	/* insert vfs inode to hash table											*/
	if( insert_inode_locked( inode ) < 0 )
	{
		ME2FS_ERROR( "<ME2FS>%s:inode number already in use[%lu]\n",
					 __func__, ( unsigned long )ino );
		err = -EIO;
		goto fail;
	}

	/* initialize quota															*/
#if 0	// quota
	dquot_initialize( inode );
	if( dquot_alloc_inode( inode ) )
	{
		goto fail_drop;
	}
#endif
#if 0	// acl
	/* initialize acl															*/
	if( me2fsInitAcl( inde, dir ) )
	{
		goto fail_free_drop;
	}
#endif
#if 0	// security
	/* initialize security														*/
	if( me2fsInitSecurity( inode, dir, qstr ) )
	{
		goto fail_free_drop;
	}
#endif

	mark_inode_dirty( inode );

	DBGPRINT( "<ME2FS>allocating new inode %lu\n",
			  ( unsigned long )inode->i_ino );

#if 0	// preread
	me2fsPrereadInode( inode );
#endif

	return( inode );

	/* ------------------------------------------------------------------------ */
	/* allocation of new inode is failed										*/
	/* ------------------------------------------------------------------------ */
fail:
	make_bad_inode( inode );
	iput( inode );
	return( ERR_PTR( err ) );
}
/*
==================================================================================
	Function	:me2fsFillSuperBlock
	Input		:struct super_block *sb
				 < vfs super block object >
				 void *data
				 < user data >
				 int silent
				 < silent flag >
	Output		:void
	Return		:void

	Description	:fill my ext2 super block object
==================================================================================
*/
static int me2fsFillSuperBlock( struct super_block *sb,
								void *data,
								int silent )
{
	struct buffer_head		*bh;
	struct ext2_super_block	*esb;
	struct me2fs_sb_info	*msi;
	struct inode			*root;
	int						block_size;
	int						ret = -EINVAL;
	int						i;
	int						err;
	unsigned long			sb_block = 1;

	/* ------------------------------------------------------------------------ */
	/* allocate memory to me2fs_sb_info											*/
	/* ------------------------------------------------------------------------ */
	msi = kzalloc( sizeof( struct me2fs_sb_info ), GFP_KERNEL );

	if( !msi )
	{
		ME2FS_ERROR( "<ME2FS>error: unable to alloc me2fs_sb_info\n" );
		ret = -ENOMEM;
		return( ret );
	}

	/* set me2fs information to vfs super block									*/
	sb->s_fs_info	= ( void* )msi;

	/* ------------------------------------------------------------------------ */
	/* allocate memory to spin locks for block group							*/
	/* ------------------------------------------------------------------------ */
	msi->s_blockgroup_lock = kzalloc( sizeof( struct blockgroup_lock ),
									  GFP_KERNEL );
	if( !msi->s_blockgroup_lock )
	{
		ME2FS_ERROR( "<ME2FS>error: unabel to alloc s_blockgroup_lock\n" );
		kfree( msi );
		return( -ENOMEM );
	}

	/* ------------------------------------------------------------------------ */
	/* set device's block size and size bits to super block						*/
	/* ------------------------------------------------------------------------ */
	block_size = sb_min_blocksize( sb, BLOCK_SIZE );

	DBGPRINT( "<ME2FS>Fill Super! block_size = %d\n", block_size );
	DBGPRINT( "<ME2FS>s_blocksize_bits = %d\n", sb->s_blocksize_bits );
	DBGPRINT( "<ME2FS>default block size is : %d\n", BLOCK_SIZE );

	if( !block_size )
	{
		ME2FS_ERROR( "<ME2FS>error: unable to set blocksize\n" );
		goto error_read_sb;
	}

	/* ------------------------------------------------------------------------ */
	/* read super block															*/
	/* ------------------------------------------------------------------------ */
	if( !( bh = sb_bread( sb, sb_block ) ) )
	{
		ME2FS_ERROR( "<ME2FS>failed to bread super block\n" );
		goto error_read_sb;
	}

	esb = ( struct ext2_super_block* )( bh->b_data );

	/* ------------------------------------------------------------------------ */
	/* check magic number														*/
	/* ------------------------------------------------------------------------ */
	sb->s_magic = le16_to_cpu( esb->s_magic );

	if( sb->s_magic != ME2FS_SUPER_MAGIC )
	{
		ME2FS_ERROR( "<ME2FS>error : magic of super block is %lu\n", sb->s_magic );
		goto error_mount;
	}

	/* ------------------------------------------------------------------------ */
	/* check revison															*/
	/* ------------------------------------------------------------------------ */
	if( ME2FS_OLD_REV == le32_to_cpu( esb->s_rev_level ) )
	{
		ME2FS_ERROR( "<ME2FS>error : cannot mount old revision\n" );
		goto error_mount;
	}

	dbgPrintExt2SB( esb );

	/* ------------------------------------------------------------------------ */
	/* set up me2fs super block information										*/
	/* ------------------------------------------------------------------------ */
	/* buffer cache information													*/
	msi->s_esb				= esb;
	msi->s_sbh				= bh;
	/* super block(disk information cache)										*/
	msi->s_sb_block			= sb_block;
	msi->s_mount_opt		= le32_to_cpu( esb->s_default_mount_opts );
	msi->s_mount_state		= le16_to_cpu( esb->s_state );

	if( msi->s_mount_state != EXT2_VALID_FS )
	{
		ME2FS_ERROR( "<ME2FS>error : cannot mount invalid fs\n" );
		goto error_mount;
	}

	if( le16_to_cpu( esb->s_errors ) == EXT2_ERRORS_CONTINUE )
	{
		DBGPRINT( "<ME2FS>s_errors : CONTINUE\n" );
	}
	else if( le16_to_cpu( esb->s_errors ) == EXT2_ERRORS_PANIC )
	{
		DBGPRINT( "<ME2FS>s_errors : PANIC\n" );
	}
	else
	{
		DBGPRINT( "<ME2FS>s_errors : READ ONLY\n" );
	}

	if( le32_to_cpu( esb->s_rev_level ) != EXT2_DYNAMIC_REV )
	{
		ME2FS_ERROR( "<ME2FS>error : cannot mount unsupported revision\n" );
		goto error_mount;
	}

	/* inode(disk information cache)											*/
	msi->s_inode_size		= le16_to_cpu( esb->s_inode_size );
	if( ( msi->s_inode_size < 128  ) ||
		!is_power_of_2( msi->s_inode_size ) ||
		( block_size < msi->s_inode_size ) )
	{
		ME2FS_ERROR( "<ME2FS>error : cannot mount unsupported inode size %u\n",
				  msi->s_inode_size );
		goto error_mount;
	}
	msi->s_first_ino		= le32_to_cpu( esb->s_first_ino );
	msi->s_inodes_per_group	= le32_to_cpu( esb->s_inodes_per_group );
	msi->s_inodes_per_block	= sb->s_blocksize / msi->s_inode_size;
	if( msi->s_inodes_per_block == 0 )
	{
		ME2FS_ERROR( "<ME2FS>error : bad inodes per block\n" );
		goto error_mount;
	}
	msi->s_itb_per_group	= msi->s_inodes_per_group / msi->s_inodes_per_block;
	/* group(disk information cache)											*/
	msi->s_blocks_per_group	= le32_to_cpu( esb->s_blocks_per_group );
	if( msi->s_blocks_per_group == 0 )
	{
		ME2FS_ERROR( "<ME2FS>eroor : bad blocks per group\n" );
		goto error_mount;
	}
	msi->s_groups_count		= ( ( le32_to_cpu( esb->s_blocks_count )
								- le32_to_cpu( esb->s_first_data_block ) - 1 )
							  / msi->s_blocks_per_group ) + 1;
	msi->s_desc_per_block	= sb->s_blocksize / sizeof( struct ext2_group_desc );
	msi->s_gdb_count		= ( msi->s_groups_count + msi->s_desc_per_block - 1 )
							  / msi->s_desc_per_block;
	/* fragment(disk information cache)											*/
	msi->s_frag_size		= 1024 << le32_to_cpu( esb->s_log_frag_size );
	if( msi->s_frag_size == 0 )
	{
		ME2FS_ERROR( "<ME2FS>eroor : bad fragment size\n" );
		goto error_mount;
	}
	msi->s_frags_per_block	= sb->s_blocksize / msi->s_frag_size;
	msi->s_frags_per_group	= le32_to_cpu( esb->s_frags_per_group );

	/* deaults(disk information cache)											*/
	msi->s_resuid = make_kuid( &init_user_ns, le16_to_cpu( esb->s_def_resuid ) );
	msi->s_resgid = make_kgid( &init_user_ns, le16_to_cpu( esb->s_def_resgid ) );
	
	dbgPrintMe2fsInfo( msi );
	
	/* ------------------------------------------------------------------------ */
	/* read block group descriptor table										*/
	/* ------------------------------------------------------------------------ */
	msi->s_group_desc = kmalloc( msi->s_gdb_count * sizeof( struct buffer_head* ),
								 GFP_KERNEL );
	
	if( !msi->s_group_desc )
	{
		ME2FS_ERROR( "<ME2FS>error : alloc memory for group desc is failed\n" );
		goto error_mount;
	}

	for( i = 0 ; i < msi->s_gdb_count ; i++ )
	{
		unsigned long	block;

		block = getDescriptorLocation( sb, sb_block, i );
		if( !( msi->s_group_desc[ i ] = sb_bread( sb, block ) ) )
		//if( !( msi->s_group_desc[ i ] = sb_bread( sb, sb_block + i + 1 ) ) )
		{
			ME2FS_ERROR( "<ME2FS>error : cannot read " );
			ME2FS_ERROR( "block group descriptor[ group=%d ]\n", i );
			goto error_mount_phase2;
		}
	}

	/* ------------------------------------------------------------------------ */
	/* sanity check for group descriptors										*/
	/* ------------------------------------------------------------------------ */
	for( i = 0 ; i < msi->s_groups_count ; i++ )
	{
		struct ext2_group_desc	*gdesc;
		unsigned long			first_block;
		unsigned long			last_block;
		unsigned long			ar_block;

		DBGPRINT( "<ME2FS>Block Count %d\n", i );

		if( !( gdesc = me2fsGetGroupDescriptor( sb, i ) ) )
		{
			/* corresponding group descriptor does not exist					*/
			goto error_mount_phase2;
		}

		first_block	= ext2GetFirstBlockNum( sb, i );

		if( i == ( msi->s_groups_count - 1 ) )
		{
			last_block	= le32_to_cpu( esb->s_blocks_count ) - 1;
		}
		else
		{
			last_block	= first_block + ( esb->s_blocks_per_group - 1 );
		}

		DBGPRINT( "<ME2FS>first = %lu, last = %lu\n", first_block, last_block );

		ar_block = le32_to_cpu( gdesc->bg_block_bitmap );

		if( ( ar_block < first_block ) || ( last_block < ar_block ) )
		{
			ME2FS_ERROR( "<ME2FS>error : block num of block bitmap is " );
			ME2FS_ERROR( "insanity [ group=%d, first=%lu, block=%lu, last=%lu ]\n",
					  i, first_block, ar_block, last_block );
			goto error_mount_phase2;
		}

		ar_block = le32_to_cpu( gdesc->bg_inode_bitmap );

		if( ( ar_block < first_block ) || ( last_block < ar_block ) )
		{
			ME2FS_ERROR( "<ME2FS>error : block num of inode bitmap is " );
			ME2FS_ERROR( "insanity [ group=%d, first=%lu, block=%lu, last=%lu ]\n",
					  i, first_block, ar_block, last_block );
			goto error_mount_phase2;
		}

		ar_block = le32_to_cpu( gdesc->bg_inode_table );

		if( ( ar_block < first_block ) || ( last_block < ar_block ) )
		{
			ME2FS_ERROR( "<ME2FS>error : block num of inode table is " );
			ME2FS_ERROR( "insanity [ group=%d, first=%lu, block=%lu, last=%lu ]\n",
					  i, first_block, ar_block, last_block );
			goto error_mount_phase2;
		}

		dbgPrintExt2Bgd( gdesc, i );
	}

	/* ------------------------------------------------------------------------ */
	/* initialize exclusive locks												*/
	/* ------------------------------------------------------------------------ */
	spin_lock_init( &msi->s_lock );

	bgl_lock_init( msi->s_blockgroup_lock );

	err = percpu_counter_init( &msi->s_freeblocks_counter,
							   me2fsCountFreeBlocks( sb ) );
	
	if( err )
	{
		ME2FS_ERROR( "<ME2FS>cannot allocate memory for percpu counter" );
		ME2FS_ERROR( "[s_freeblocks_counter]\n" );
		goto error_mount_phase3;
	}

	err = percpu_counter_init( &msi->s_freeinodes_counter,
							   me2fsCountFreeInodes( sb ) );
	
	if( err )
	{
		ME2FS_ERROR( "<ME2FS>cannot allocate memory for percpu counter" );
		ME2FS_ERROR( "[s_freeinodes_counter]\n" );
		goto error_mount_phase3;
	}

	err = percpu_counter_init( &msi->s_dirs_counter,
							   me2fsCountDirectories( sb ) );
	
	if( err )
	{
		ME2FS_ERROR( "<ME2FS>cannot allocate memory for percpu counter" );
		ME2FS_ERROR( "[s_dirs_counter]\n" );
		goto error_mount_phase3;
	}

	/* ------------------------------------------------------------------------ */
	/* set up vfs super block													*/
	/* ------------------------------------------------------------------------ */
	sb->s_op		= &me2fs_super_ops;
	//sb->s_export_op	= &me2fs_export_ops;
	//sb->s_xattr		= me2fs_xattr_handler;

	sb->s_maxbytes	= me2fsMaxFileSize( sb );
	sb->s_max_links	= ME2FS_LINK_MAX;

	DBGPRINT( "<ME2FS>max file size = %lld\n", sb->s_maxbytes );


	root = me2fsGetVfsInode( sb, ME2FS_EXT2_ROOT_INO );
	//root = iget_locked( sb, ME2FS_EXT2_ROOT_INO );

	if( IS_ERR( root ) )
	{
		ME2FS_ERROR( "<ME2FS>error : failed to get root inode\n" );
		ret = PTR_ERR( root );
		goto error_mount_phase3;
	}


	if( !S_ISDIR( root->i_mode ) )
	{
		ME2FS_ERROR( "<ME2FS>root is not directory!!!!\n" );
	}

	sb->s_root = d_make_root( root );
	//sb->s_root = d_alloc_root( root_ino );

	if( !sb->s_root )
	{
		ME2FS_ERROR( "<ME2FS>error : failed to make root\n" );
		ret = -ENOMEM;
		goto error_mount_phase3;
	}

	le16_add_cpu( &esb->s_mnt_count, 1 );

	me2fsWriteSuper( sb );

	DBGPRINT( "<ME2FS> me2fs is mounted !\n" );

	return( 0 );

	/* ------------------------------------------------------------------------ */
	/* destroy percpu counter													*/
	/* ------------------------------------------------------------------------ */
error_mount_phase3:
	percpu_counter_destroy( &msi->s_freeblocks_counter );
	percpu_counter_destroy( &msi->s_freeinodes_counter );
	percpu_counter_destroy( &msi->s_dirs_counter );
	/* ------------------------------------------------------------------------ */
	/* release buffer for group descriptors										*/
	/* ------------------------------------------------------------------------ */
error_mount_phase2:
	for( i = 0 ; i < msi->s_gdb_count ; i++ )
	{
		brelse( msi->s_group_desc[ i ] );
	}
	kfree( msi->s_group_desc );
	/* ------------------------------------------------------------------------ */
	/* release buffer cache for read super block								*/
	/* ------------------------------------------------------------------------ */
error_mount:
	brelse( bh );

	/* ------------------------------------------------------------------------ */
	/* release me2fs super block information									*/
	/* ------------------------------------------------------------------------ */
error_read_sb:
	sb->s_fs_info = NULL;
	kfree( msi->s_blockgroup_lock );
	kfree( msi );

	return( ret );
}
Exemple #11
0
Fichier : ie.c Projet : 7799/linux
/* This function prepares IE data buffer for command to be sent to FW */
static int
mwifiex_update_autoindex_ies(struct mwifiex_private *priv,
			     struct mwifiex_ie_list *ie_list)
{
	u16 travel_len, index, mask;
	s16 input_len, tlv_len;
	struct mwifiex_ie *ie;
	u8 *tmp;

	input_len = le16_to_cpu(ie_list->len);
	travel_len = sizeof(struct mwifiex_ie_types_header);

	ie_list->len = 0;

	while (input_len >= sizeof(struct mwifiex_ie_types_header)) {
		ie = (struct mwifiex_ie *)(((u8 *)ie_list) + travel_len);
		tlv_len = le16_to_cpu(ie->ie_length);
		travel_len += tlv_len + MWIFIEX_IE_HDR_SIZE;

		if (input_len < tlv_len + MWIFIEX_IE_HDR_SIZE)
			return -1;
		index = le16_to_cpu(ie->ie_index);
		mask = le16_to_cpu(ie->mgmt_subtype_mask);

		if (index == MWIFIEX_AUTO_IDX_MASK) {
			/* automatic addition */
			if (mwifiex_ie_get_autoidx(priv, mask, ie, &index))
				return -1;
			if (index == MWIFIEX_AUTO_IDX_MASK)
				return -1;

			tmp = (u8 *)&priv->mgmt_ie[index].ie_buffer;
			memcpy(tmp, &ie->ie_buffer, le16_to_cpu(ie->ie_length));
			priv->mgmt_ie[index].ie_length = ie->ie_length;
			priv->mgmt_ie[index].ie_index = cpu_to_le16(index);
			priv->mgmt_ie[index].mgmt_subtype_mask =
							cpu_to_le16(mask);

			ie->ie_index = cpu_to_le16(index);
		} else {
			if (mask != MWIFIEX_DELETE_MASK)
				return -1;
			/*
			 * Check if this index is being used on any
			 * other interface.
			 */
			if (mwifiex_ie_index_used_by_other_intf(priv, index))
				return -1;

			ie->ie_length = 0;
			memcpy(&priv->mgmt_ie[index], ie,
			       sizeof(struct mwifiex_ie));
		}

		le16_add_cpu(&ie_list->len,
			     le16_to_cpu(priv->mgmt_ie[index].ie_length) +
			     MWIFIEX_IE_HDR_SIZE);
		input_len -= tlv_len + MWIFIEX_IE_HDR_SIZE;
	}

	if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP)
		return mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG,
					HostCmd_ACT_GEN_SET,
					UAP_CUSTOM_IE_I, ie_list, false);

	return 0;
}
/* Add a new group descriptor to global_bitmap. */
int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input)
{
	int ret;
	handle_t *handle;
	struct buffer_head *main_bm_bh = NULL;
	struct inode *main_bm_inode = NULL;
	struct ocfs2_dinode *fe = NULL;
	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
	struct buffer_head *group_bh = NULL;
	struct ocfs2_group_desc *group = NULL;
	struct ocfs2_chain_list *cl;
	struct ocfs2_chain_rec *cr;
	u16 cl_bpc;

	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
		return -EROFS;

	main_bm_inode = ocfs2_get_system_file_inode(osb,
						    GLOBAL_BITMAP_SYSTEM_INODE,
						    OCFS2_INVALID_SLOT);
	if (!main_bm_inode) {
		ret = -EINVAL;
		mlog_errno(ret);
		goto out;
	}

	mutex_lock(&main_bm_inode->i_mutex);

	ret = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 1);
	if (ret < 0) {
		mlog_errno(ret);
		goto out_mutex;
	}

	fe = (struct ocfs2_dinode *)main_bm_bh->b_data;

	if (le16_to_cpu(fe->id2.i_chain.cl_cpg) !=
		ocfs2_group_bitmap_size(osb->sb, 0,
					osb->s_feature_incompat) * 8) {
		mlog(ML_ERROR, "The disk is too old and small."
		     " Force to do offline resize.");
		ret = -EINVAL;
		goto out_unlock;
	}

	ret = ocfs2_read_blocks_sync(osb, input->group, 1, &group_bh);
	if (ret < 0) {
		mlog(ML_ERROR, "Can't read the group descriptor # %llu "
		     "from the device.", (unsigned long long)input->group);
		goto out_unlock;
	}

	ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), group_bh);

	ret = ocfs2_verify_group_and_input(main_bm_inode, fe, input, group_bh);
	if (ret) {
		mlog_errno(ret);
		goto out_unlock;
	}

	trace_ocfs2_group_add((unsigned long long)input->group,
			       input->chain, input->clusters, input->frees);

	handle = ocfs2_start_trans(osb, OCFS2_GROUP_ADD_CREDITS);
	if (IS_ERR(handle)) {
		mlog_errno(PTR_ERR(handle));
		ret = -EINVAL;
		goto out_unlock;
	}

	cl_bpc = le16_to_cpu(fe->id2.i_chain.cl_bpc);
	cl = &fe->id2.i_chain;
	cr = &cl->cl_recs[input->chain];

	ret = ocfs2_journal_access_gd(handle, INODE_CACHE(main_bm_inode),
				      group_bh, OCFS2_JOURNAL_ACCESS_WRITE);
	if (ret < 0) {
		mlog_errno(ret);
		goto out_commit;
	}

	group = (struct ocfs2_group_desc *)group_bh->b_data;
	group->bg_next_group = cr->c_blkno;
	ocfs2_journal_dirty(handle, group_bh);

	ret = ocfs2_journal_access_di(handle, INODE_CACHE(main_bm_inode),
				      main_bm_bh, OCFS2_JOURNAL_ACCESS_WRITE);
	if (ret < 0) {
		mlog_errno(ret);
		goto out_commit;
	}

	if (input->chain == le16_to_cpu(cl->cl_next_free_rec)) {
		le16_add_cpu(&cl->cl_next_free_rec, 1);
		memset(cr, 0, sizeof(struct ocfs2_chain_rec));
	}

	cr->c_blkno = cpu_to_le64(input->group);
	le32_add_cpu(&cr->c_total, input->clusters * cl_bpc);
	le32_add_cpu(&cr->c_free, input->frees * cl_bpc);

	le32_add_cpu(&fe->id1.bitmap1.i_total, input->clusters *cl_bpc);
	le32_add_cpu(&fe->id1.bitmap1.i_used,
		     (input->clusters - input->frees) * cl_bpc);
	le32_add_cpu(&fe->i_clusters, input->clusters);

	ocfs2_journal_dirty(handle, main_bm_bh);

	spin_lock(&OCFS2_I(main_bm_inode)->ip_lock);
	OCFS2_I(main_bm_inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
	le64_add_cpu(&fe->i_size, input->clusters << osb->s_clustersize_bits);
	spin_unlock(&OCFS2_I(main_bm_inode)->ip_lock);
	i_size_write(main_bm_inode, le64_to_cpu(fe->i_size));

	ocfs2_update_super_and_backups(main_bm_inode, input->clusters);

out_commit:
	ocfs2_commit_trans(osb, handle);
out_unlock:
	brelse(group_bh);
	brelse(main_bm_bh);

	ocfs2_inode_unlock(main_bm_inode, 1);

out_mutex:
	mutex_unlock(&main_bm_inode->i_mutex);
	iput(main_bm_inode);

out:
	return ret;
}
Exemple #13
0
/*
 * We expect the block group allocator to already be locked.
 */
static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
				   struct inode *alloc_inode,
				   struct buffer_head *bh)
{
	int status, credits;
	struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bh->b_data;
	struct ocfs2_chain_list *cl;
	struct ocfs2_alloc_context *ac = NULL;
	handle_t *handle = NULL;
	u32 bit_off, num_bits;
	u16 alloc_rec;
	u64 bg_blkno;
	struct buffer_head *bg_bh = NULL;
	struct ocfs2_group_desc *bg;

	BUG_ON(ocfs2_is_cluster_bitmap(alloc_inode));

	mlog_entry_void();

	cl = &fe->id2.i_chain;
	status = ocfs2_reserve_clusters(osb,
					le16_to_cpu(cl->cl_cpg),
					&ac);
	if (status < 0) {
		if (status != -ENOSPC)
			mlog_errno(status);
		goto bail;
	}

	credits = ocfs2_calc_group_alloc_credits(osb->sb,
						 le16_to_cpu(cl->cl_cpg));
	handle = ocfs2_start_trans(osb, credits);
	if (IS_ERR(handle)) {
		status = PTR_ERR(handle);
		handle = NULL;
		mlog_errno(status);
		goto bail;
	}

	status = ocfs2_claim_clusters(osb,
				      handle,
				      ac,
				      le16_to_cpu(cl->cl_cpg),
				      &bit_off,
				      &num_bits);
	if (status < 0) {
		if (status != -ENOSPC)
			mlog_errno(status);
		goto bail;
	}

	alloc_rec = ocfs2_find_smallest_chain(cl);

	/* setup the group */
	bg_blkno = ocfs2_clusters_to_blocks(osb->sb, bit_off);
	mlog(0, "new descriptor, record %u, at block %llu\n",
	     alloc_rec, (unsigned long long)bg_blkno);

	bg_bh = sb_getblk(osb->sb, bg_blkno);
	if (!bg_bh) {
		status = -EIO;
		mlog_errno(status);
		goto bail;
	}
	ocfs2_set_new_buffer_uptodate(alloc_inode, bg_bh);

	status = ocfs2_block_group_fill(handle,
					alloc_inode,
					bg_bh,
					bg_blkno,
					alloc_rec,
					cl);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

	bg = (struct ocfs2_group_desc *) bg_bh->b_data;

	status = ocfs2_journal_access(handle, alloc_inode,
				      bh, OCFS2_JOURNAL_ACCESS_WRITE);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

	le32_add_cpu(&cl->cl_recs[alloc_rec].c_free,
		     le16_to_cpu(bg->bg_free_bits_count));
	le32_add_cpu(&cl->cl_recs[alloc_rec].c_total, le16_to_cpu(bg->bg_bits));
	cl->cl_recs[alloc_rec].c_blkno  = cpu_to_le64(bg_blkno);
	if (le16_to_cpu(cl->cl_next_free_rec) < le16_to_cpu(cl->cl_count))
		le16_add_cpu(&cl->cl_next_free_rec, 1);

	le32_add_cpu(&fe->id1.bitmap1.i_used, le16_to_cpu(bg->bg_bits) -
					le16_to_cpu(bg->bg_free_bits_count));
	le32_add_cpu(&fe->id1.bitmap1.i_total, le16_to_cpu(bg->bg_bits));
	le32_add_cpu(&fe->i_clusters, le16_to_cpu(cl->cl_cpg));

	status = ocfs2_journal_dirty(handle, bh);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

	spin_lock(&OCFS2_I(alloc_inode)->ip_lock);
	OCFS2_I(alloc_inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
	fe->i_size = cpu_to_le64(ocfs2_clusters_to_bytes(alloc_inode->i_sb,
					     le32_to_cpu(fe->i_clusters)));
	spin_unlock(&OCFS2_I(alloc_inode)->ip_lock);
	i_size_write(alloc_inode, le64_to_cpu(fe->i_size));
	alloc_inode->i_blocks = ocfs2_inode_sector_count(alloc_inode);

	status = 0;
bail:
	if (handle)
		ocfs2_commit_trans(osb, handle);

	if (ac)
		ocfs2_free_alloc_context(ac);

	if (bg_bh)
		brelse(bg_bh);

	mlog_exit(status);
	return status;
}
Exemple #14
0
/*
 * We expect the block group allocator to already be locked.
 */
static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
				   struct inode *alloc_inode,
				   struct buffer_head *bh,
				   u64 max_block,
				   u64 *last_alloc_group,
				   int flags)
{
	int status, credits;
	struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bh->b_data;
	struct ocfs2_chain_list *cl;
	struct ocfs2_alloc_context *ac = NULL;
	handle_t *handle = NULL;
	u16 alloc_rec;
	struct buffer_head *bg_bh = NULL;
	struct ocfs2_group_desc *bg;

	BUG_ON(ocfs2_is_cluster_bitmap(alloc_inode));

	cl = &fe->id2.i_chain;
	status = ocfs2_reserve_clusters_with_limit(osb,
						   le16_to_cpu(cl->cl_cpg),
						   max_block, flags, &ac);
	if (status < 0) {
		if (status != -ENOSPC)
			mlog_errno(status);
		goto bail;
	}

	credits = ocfs2_calc_group_alloc_credits(osb->sb,
						 le16_to_cpu(cl->cl_cpg));
	handle = ocfs2_start_trans(osb, credits);
	if (IS_ERR(handle)) {
		status = PTR_ERR(handle);
		handle = NULL;
		mlog_errno(status);
		goto bail;
	}

	if (last_alloc_group && *last_alloc_group != 0) {
		trace_ocfs2_block_group_alloc(
				(unsigned long long)*last_alloc_group);
		ac->ac_last_group = *last_alloc_group;
	}

	bg_bh = ocfs2_block_group_alloc_contig(osb, handle, alloc_inode,
					       ac, cl);
	if (IS_ERR(bg_bh) && (PTR_ERR(bg_bh) == -ENOSPC))
		bg_bh = ocfs2_block_group_alloc_discontig(handle,
							  alloc_inode,
							  ac, cl);
	if (IS_ERR(bg_bh)) {
		status = PTR_ERR(bg_bh);
		bg_bh = NULL;
		if (status != -ENOSPC)
			mlog_errno(status);
		goto bail;
	}
	bg = (struct ocfs2_group_desc *) bg_bh->b_data;

	status = ocfs2_journal_access_di(handle, INODE_CACHE(alloc_inode),
					 bh, OCFS2_JOURNAL_ACCESS_WRITE);
	if (status < 0) {
		mlog_errno(status);
		goto bail;
	}

	alloc_rec = le16_to_cpu(bg->bg_chain);
	le32_add_cpu(&cl->cl_recs[alloc_rec].c_free,
		     le16_to_cpu(bg->bg_free_bits_count));
	le32_add_cpu(&cl->cl_recs[alloc_rec].c_total,
		     le16_to_cpu(bg->bg_bits));
	cl->cl_recs[alloc_rec].c_blkno = bg->bg_blkno;
	if (le16_to_cpu(cl->cl_next_free_rec) < le16_to_cpu(cl->cl_count))
		le16_add_cpu(&cl->cl_next_free_rec, 1);

	le32_add_cpu(&fe->id1.bitmap1.i_used, le16_to_cpu(bg->bg_bits) -
					le16_to_cpu(bg->bg_free_bits_count));
	le32_add_cpu(&fe->id1.bitmap1.i_total, le16_to_cpu(bg->bg_bits));
	le32_add_cpu(&fe->i_clusters, le16_to_cpu(cl->cl_cpg));

	ocfs2_journal_dirty(handle, bh);

	spin_lock(&OCFS2_I(alloc_inode)->ip_lock);
	OCFS2_I(alloc_inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
	fe->i_size = cpu_to_le64(ocfs2_clusters_to_bytes(alloc_inode->i_sb,
					     le32_to_cpu(fe->i_clusters)));
	spin_unlock(&OCFS2_I(alloc_inode)->ip_lock);
	i_size_write(alloc_inode, le64_to_cpu(fe->i_size));
	alloc_inode->i_blocks = ocfs2_inode_sector_count(alloc_inode);
	ocfs2_update_inode_fsync_trans(handle, alloc_inode, 0);

	status = 0;

	/* save the new last alloc group so that the caller can cache it. */
	if (last_alloc_group)
		*last_alloc_group = ac->ac_last_group;

bail:
	if (handle)
		ocfs2_commit_trans(osb, handle);

	if (ac)
		ocfs2_free_alloc_context(ac);

	brelse(bg_bh);

	if (status)
		mlog_errno(status);
	return status;
}
Exemple #15
0
static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
				     struct sk_buff *skb,
				     struct iwl_tfh_tfd *tfd, int start_len,
				     u8 hdr_len, struct iwl_device_cmd *dev_cmd)
{
#ifdef CONFIG_INET
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
	struct ieee80211_hdr *hdr = (void *)skb->data;
	unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
	unsigned int mss = skb_shinfo(skb)->gso_size;
	u16 length, iv_len, amsdu_pad;
	u8 *start_hdr;
	struct iwl_tso_hdr_page *hdr_page;
	struct page **page_ptr;
	struct tso_t tso;

	/* if the packet is protected, then it must be CCMP or GCMP */
	iv_len = ieee80211_has_protected(hdr->frame_control) ?
		IEEE80211_CCMP_HDR_LEN : 0;

	trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
			     &dev_cmd->hdr, start_len, 0);

	ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
	snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
	total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
	amsdu_pad = 0;

	/* total amount of header we may need for this A-MSDU */
	hdr_room = DIV_ROUND_UP(total_len, mss) *
		(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;

	/* Our device supports 9 segments at most, it will fit in 1 page */
	hdr_page = get_page_hdr(trans, hdr_room);
	if (!hdr_page)
		return -ENOMEM;

	get_page(hdr_page->page);
	start_hdr = hdr_page->pos;
	page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
	*page_ptr = hdr_page->page;
	memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
	hdr_page->pos += iv_len;

	/*
	 * Pull the ieee80211 header + IV to be able to use TSO core,
	 * we will restore it for the tx_status flow.
	 */
	skb_pull(skb, hdr_len + iv_len);

	/*
	 * Remove the length of all the headers that we don't actually
	 * have in the MPDU by themselves, but that we duplicate into
	 * all the different MSDUs inside the A-MSDU.
	 */
	le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);

	tso_start(skb, &tso);

	while (total_len) {
		/* this is the data left for this subframe */
		unsigned int data_left = min_t(unsigned int, mss, total_len);
		struct sk_buff *csum_skb = NULL;
		unsigned int tb_len;
		dma_addr_t tb_phys;
		u8 *subf_hdrs_start = hdr_page->pos;

		total_len -= data_left;

		memset(hdr_page->pos, 0, amsdu_pad);
		hdr_page->pos += amsdu_pad;
		amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
				  data_left)) & 0x3;
		ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
		hdr_page->pos += ETH_ALEN;
		ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
		hdr_page->pos += ETH_ALEN;

		length = snap_ip_tcp_hdrlen + data_left;
		*((__be16 *)hdr_page->pos) = cpu_to_be16(length);
		hdr_page->pos += sizeof(length);

		/*
		 * This will copy the SNAP as well which will be considered
		 * as MAC header.
		 */
		tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);

		hdr_page->pos += snap_ip_tcp_hdrlen;

		tb_len = hdr_page->pos - start_hdr;
		tb_phys = dma_map_single(trans->dev, start_hdr,
					 tb_len, DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
			dev_kfree_skb(csum_skb);
			goto out_err;
		}
		iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
		trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, tb_len);
		/* add this subframe's headers' length to the tx_cmd */
		le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);

		/* prepare the start_hdr for the next subframe */
		start_hdr = hdr_page->pos;

		/* put the payload */
		while (data_left) {
			tb_len = min_t(unsigned int, tso.size, data_left);
			tb_phys = dma_map_single(trans->dev, tso.data,
						 tb_len, DMA_TO_DEVICE);
			if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
				dev_kfree_skb(csum_skb);
				goto out_err;
			}
			iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
			trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
						tb_len);

			data_left -= tb_len;
			tso_build_data(skb, &tso, tb_len);
		}
	}

	/* re -add the WiFi header and IV */
	skb_push(skb, hdr_len + iv_len);

	return 0;

out_err:
#endif
	return -EINVAL;
}
Exemple #16
0
/*
 * There are two policies for allocating an inode.  If the new inode is
 * a directory, then a forward search is made for a block group with both
 * free space and a low directory-to-inode ratio; if that fails, then of
 * the groups with above-average free space, that group with the fewest
 * directories already is chosen.
 *
 * For other inodes, search forward from the parent directory's block
 * group to find a free inode.
 */
struct inode *ext3_new_inode(handle_t *handle, struct inode * dir,
			     const struct qstr *qstr, int mode)
{
	struct super_block *sb;
	struct buffer_head *bitmap_bh = NULL;
	struct buffer_head *bh2;
	int group;
	unsigned long ino = 0;
	struct inode * inode;
	struct ext3_group_desc * gdp = NULL;
	struct ext3_super_block * es;
	struct ext3_inode_info *ei;
	struct ext3_sb_info *sbi;
	int err = 0;
	struct inode *ret;
	int i;

	/* Cannot create files in a deleted directory */
	if (!dir || !dir->i_nlink)
		return ERR_PTR(-EPERM);

	sb = dir->i_sb;
	trace_ext3_request_inode(dir, mode);
	inode = new_inode(sb);
	if (!inode)
		return ERR_PTR(-ENOMEM);
	ei = EXT3_I(inode);

	sbi = EXT3_SB(sb);
	es = sbi->s_es;
	if (S_ISDIR(mode)) {
		if (test_opt (sb, OLDALLOC))
			group = find_group_dir(sb, dir);
		else
			group = find_group_orlov(sb, dir);
	} else
		group = find_group_other(sb, dir);

	err = -ENOSPC;
	if (group == -1)
		goto out;

	for (i = 0; i < sbi->s_groups_count; i++) {
		err = -EIO;

		gdp = ext3_get_group_desc(sb, group, &bh2);
		if (!gdp)
			goto fail;

		brelse(bitmap_bh);
		bitmap_bh = read_inode_bitmap(sb, group);
		if (!bitmap_bh)
			goto fail;

		ino = 0;

repeat_in_this_group:
		ino = ext3_find_next_zero_bit((unsigned long *)
				bitmap_bh->b_data, EXT3_INODES_PER_GROUP(sb), ino);
		if (ino < EXT3_INODES_PER_GROUP(sb)) {

			BUFFER_TRACE(bitmap_bh, "get_write_access");
			err = ext3_journal_get_write_access(handle, bitmap_bh);
			if (err)
				goto fail;

			if (!ext3_set_bit_atomic(sb_bgl_lock(sbi, group),
						ino, bitmap_bh->b_data)) {
				/* we won it */
				BUFFER_TRACE(bitmap_bh,
					"call ext3_journal_dirty_metadata");
				err = ext3_journal_dirty_metadata(handle,
								bitmap_bh);
				if (err)
					goto fail;
				goto got;
			}
			/* we lost it */
			journal_release_buffer(handle, bitmap_bh);

			if (++ino < EXT3_INODES_PER_GROUP(sb))
				goto repeat_in_this_group;
		}

		/*
		 * This case is possible in concurrent environment.  It is very
		 * rare.  We cannot repeat the find_group_xxx() call because
		 * that will simply return the same blockgroup, because the
		 * group descriptor metadata has not yet been updated.
		 * So we just go onto the next blockgroup.
		 */
		if (++group == sbi->s_groups_count)
			group = 0;
	}
	err = -ENOSPC;
	goto out;

got:
	ino += group * EXT3_INODES_PER_GROUP(sb) + 1;
	if (ino < EXT3_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
		ext3_error (sb, "ext3_new_inode",
			    "reserved inode or inode > inodes count - "
			    "block_group = %d, inode=%lu", group, ino);
		err = -EIO;
		goto fail;
	}

	BUFFER_TRACE(bh2, "get_write_access");
	err = ext3_journal_get_write_access(handle, bh2);
	if (err) goto fail;
	spin_lock(sb_bgl_lock(sbi, group));
	le16_add_cpu(&gdp->bg_free_inodes_count, -1);
	if (S_ISDIR(mode)) {
		le16_add_cpu(&gdp->bg_used_dirs_count, 1);
	}
	spin_unlock(sb_bgl_lock(sbi, group));
	BUFFER_TRACE(bh2, "call ext3_journal_dirty_metadata");
	err = ext3_journal_dirty_metadata(handle, bh2);
	if (err) goto fail;

	percpu_counter_dec(&sbi->s_freeinodes_counter);
	if (S_ISDIR(mode))
		percpu_counter_inc(&sbi->s_dirs_counter);


	if (test_opt(sb, GRPID)) {
		inode->i_mode = mode;
		inode->i_uid = current_fsuid();
		inode->i_gid = dir->i_gid;
	} else
		inode_init_owner(inode, dir, mode);

	inode->i_ino = ino;
	/* This is the optimal IO size (for stat), not the fs block size */
	inode->i_blocks = 0;
	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;

	memset(ei->i_data, 0, sizeof(ei->i_data));
	ei->i_dir_start_lookup = 0;
	ei->i_disksize = 0;

	ei->i_flags =
		ext3_mask_flags(mode, EXT3_I(dir)->i_flags & EXT3_FL_INHERITED);
#ifdef EXT3_FRAGMENTS
	ei->i_faddr = 0;
	ei->i_frag_no = 0;
	ei->i_frag_size = 0;
#endif
	ei->i_file_acl = 0;
	ei->i_dir_acl = 0;
	ei->i_dtime = 0;
	ei->i_block_alloc_info = NULL;
	ei->i_block_group = group;

	ext3_set_inode_flags(inode);
	if (IS_DIRSYNC(inode))
		handle->h_sync = 1;
	if (insert_inode_locked(inode) < 0) {
		err = -EINVAL;
		goto fail_drop;
	}
	spin_lock(&sbi->s_next_gen_lock);
	inode->i_generation = sbi->s_next_generation++;
	spin_unlock(&sbi->s_next_gen_lock);

	ei->i_state_flags = 0;
	ext3_set_inode_state(inode, EXT3_STATE_NEW);

	/* See comment in ext3_iget for explanation */
	if (ino >= EXT3_FIRST_INO(sb) + 1 &&
	    EXT3_INODE_SIZE(sb) > EXT3_GOOD_OLD_INODE_SIZE) {
		ei->i_extra_isize =
			sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE;
	} else {
		ei->i_extra_isize = 0;
	}

	ret = inode;
	dquot_initialize(inode);
	err = dquot_alloc_inode(inode);
	if (err)
		goto fail_drop;

	err = ext3_init_acl(handle, inode, dir);
	if (err)
		goto fail_free_drop;

	err = ext3_init_security(handle, inode, dir, qstr);
	if (err)
		goto fail_free_drop;

	err = ext3_mark_inode_dirty(handle, inode);
	if (err) {
		ext3_std_error(sb, err);
		goto fail_free_drop;
	}

	ext3_debug("allocating inode %lu\n", inode->i_ino);
	trace_ext3_allocate_inode(inode, dir, mode);
	goto really_out;
fail:
	ext3_std_error(sb, err);
out:
	iput(inode);
	ret = ERR_PTR(err);
really_out:
	brelse(bitmap_bh);
	return ret;

fail_free_drop:
	dquot_free_inode(inode);

fail_drop:
	dquot_drop(inode);
	inode->i_flags |= S_NOQUOTA;
	inode->i_nlink = 0;
	unlock_new_inode(inode);
	iput(inode);
	brelse(bitmap_bh);
	return ERR_PTR(err);
}
Exemple #17
0
/*
 * NOTE! When we get the inode, we're the only people
 * that have access to it, and as such there are no
 * race conditions we have to worry about. The inode
 * is not on the hash-lists, and it cannot be reached
 * through the filesystem because the directory entry
 * has been deleted earlier.
 *
 * HOWEVER: we must make sure that we get no aliases,
 * which means that we have to call "clear_inode()"
 * _before_ we mark the inode not in use in the inode
 * bitmaps. Otherwise a newly created file might use
 * the same inode number (not actually the same pointer
 * though), and then we'd have two inodes sharing the
 * same inode number and space on the harddisk.
 */
void ext3_free_inode (handle_t *handle, struct inode * inode)
{
	struct super_block * sb = inode->i_sb;
	int is_directory;
	unsigned long ino;
	struct buffer_head *bitmap_bh = NULL;
	struct buffer_head *bh2;
	unsigned long block_group;
	unsigned long bit;
	struct ext3_group_desc * gdp;
	struct ext3_super_block * es;
	struct ext3_sb_info *sbi;
	int fatal = 0, err;

	if (atomic_read(&inode->i_count) > 1) {
		printk ("ext3_free_inode: inode has count=%d\n",
					atomic_read(&inode->i_count));
		return;
	}
	if (inode->i_nlink) {
		printk ("ext3_free_inode: inode has nlink=%d\n",
			inode->i_nlink);
		return;
	}
	if (!sb) {
		printk("ext3_free_inode: inode on nonexistent device\n");
		return;
	}
	sbi = EXT3_SB(sb);

	ino = inode->i_ino;
	ext3_debug ("freeing inode %lu\n", ino);
	trace_ext3_free_inode(inode);

	is_directory = S_ISDIR(inode->i_mode);

	es = EXT3_SB(sb)->s_es;
	if (ino < EXT3_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
		ext3_error (sb, "ext3_free_inode",
			    "reserved or nonexistent inode %lu", ino);
		goto error_return;
	}
	block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
	bit = (ino - 1) % EXT3_INODES_PER_GROUP(sb);
	bitmap_bh = read_inode_bitmap(sb, block_group);
	if (!bitmap_bh)
		goto error_return;

	BUFFER_TRACE(bitmap_bh, "get_write_access");
	fatal = ext3_journal_get_write_access(handle, bitmap_bh);
	if (fatal)
		goto error_return;

	/* Ok, now we can actually update the inode bitmaps.. */
	if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
					bit, bitmap_bh->b_data))
		ext3_error (sb, "ext3_free_inode",
			      "bit already cleared for inode %lu", ino);
	else {
		gdp = ext3_get_group_desc (sb, block_group, &bh2);

		BUFFER_TRACE(bh2, "get_write_access");
		fatal = ext3_journal_get_write_access(handle, bh2);
		if (fatal) goto error_return;

		if (gdp) {
			spin_lock(sb_bgl_lock(sbi, block_group));
			le16_add_cpu(&gdp->bg_free_inodes_count, 1);
			if (is_directory)
				le16_add_cpu(&gdp->bg_used_dirs_count, -1);
			spin_unlock(sb_bgl_lock(sbi, block_group));
			percpu_counter_inc(&sbi->s_freeinodes_counter);
			if (is_directory)
				percpu_counter_dec(&sbi->s_dirs_counter);

		}
		BUFFER_TRACE(bh2, "call ext3_journal_dirty_metadata");
		err = ext3_journal_dirty_metadata(handle, bh2);
		if (!fatal) fatal = err;
	}
	BUFFER_TRACE(bitmap_bh, "call ext3_journal_dirty_metadata");
	err = ext3_journal_dirty_metadata(handle, bitmap_bh);
	if (!fatal)
		fatal = err;

error_return:
	brelse(bitmap_bh);
	ext3_std_error(sb, fatal);
}
Exemple #18
0
/**
 * ext4_add_groupblocks() -- Add given blocks to an existing group
 * @handle:			handle to this transaction
 * @sb:				super block
 * @block:			start physcial block to add to the block group
 * @count:			number of blocks to free
 *
 * This marks the blocks as free in the bitmap. We ask the
 * mballoc to reload the buddy after this by setting group
 * EXT4_GROUP_INFO_NEED_INIT_BIT flag
 */
void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
			 ext4_fsblk_t block, unsigned long count)
{
	struct buffer_head *bitmap_bh = NULL;
	struct buffer_head *gd_bh;
	ext4_group_t block_group;
	ext4_grpblk_t bit;
	unsigned long i;
	struct ext4_group_desc *desc;
	struct ext4_super_block *es;
	struct ext4_sb_info *sbi;
	int err = 0, ret;
	ext4_grpblk_t blocks_freed;
	struct ext4_group_info *grp;

	sbi = EXT4_SB(sb);
	es = sbi->s_es;
	ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);

	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
	grp = ext4_get_group_info(sb, block_group);
	/*
	 * Check to see if we are freeing blocks across a group
	 * boundary.
	 */
	if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
		goto error_return;
	}
	bitmap_bh = ext4_read_block_bitmap(sb, block_group);
	if (!bitmap_bh)
		goto error_return;
	desc = ext4_get_group_desc(sb, block_group, &gd_bh);
	if (!desc)
		goto error_return;

	if (in_range(ext4_block_bitmap(sb, desc), block, count) ||
	    in_range(ext4_inode_bitmap(sb, desc), block, count) ||
	    in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
	    in_range(block + count - 1, ext4_inode_table(sb, desc),
		     sbi->s_itb_per_group)) {
		ext4_error(sb, __func__,
			   "Adding blocks in system zones - "
			   "Block = %llu, count = %lu",
			   block, count);
		goto error_return;
	}

	/*
	 * We are about to add blocks to the bitmap,
	 * so we need undo access.
	 */
	BUFFER_TRACE(bitmap_bh, "getting undo access");
	err = ext4_journal_get_undo_access(handle, bitmap_bh);
	if (err)
		goto error_return;

	/*
	 * We are about to modify some metadata.  Call the journal APIs
	 * to unshare ->b_data if a currently-committing transaction is
	 * using it
	 */
	BUFFER_TRACE(gd_bh, "get_write_access");
	err = ext4_journal_get_write_access(handle, gd_bh);
	if (err)
		goto error_return;
	/*
	 * make sure we don't allow a parallel init on other groups in the
	 * same buddy cache
	 */
	down_write(&grp->alloc_sem);
	for (i = 0, blocks_freed = 0; i < count; i++) {
		BUFFER_TRACE(bitmap_bh, "clear bit");
		if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
						bit + i, bitmap_bh->b_data)) {
			ext4_error(sb, __func__,
				   "bit already cleared for block %llu",
				   (ext4_fsblk_t)(block + i));
			BUFFER_TRACE(bitmap_bh, "bit already cleared");
		} else {
			blocks_freed++;
		}
	}
	spin_lock(sb_bgl_lock(sbi, block_group));
	le16_add_cpu(&desc->bg_free_blocks_count, blocks_freed);
	desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
	spin_unlock(sb_bgl_lock(sbi, block_group));
	percpu_counter_add(&sbi->s_freeblocks_counter, blocks_freed);

	if (sbi->s_log_groups_per_flex) {
		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
		spin_lock(sb_bgl_lock(sbi, flex_group));
		sbi->s_flex_groups[flex_group].free_blocks += blocks_freed;
		spin_unlock(sb_bgl_lock(sbi, flex_group));
	}
	/*
	 * request to reload the buddy with the
	 * new bitmap information
	 */
	set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
	ext4_mb_update_group_info(grp, blocks_freed);
	up_write(&grp->alloc_sem);

	/* We dirtied the bitmap block */
	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
	err = ext4_journal_dirty_metadata(handle, bitmap_bh);

	/* And the group descriptor block */
	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
	ret = ext4_journal_dirty_metadata(handle, gd_bh);
	if (!err)
		err = ret;
	sb->s_dirt = 1;

error_return:
	brelse(bitmap_bh);
	ext4_std_error(sb, err);
	return;
}
static int ocfs2_update_last_group_and_inode(handle_t *handle,
					     struct inode *bm_inode,
					     struct buffer_head *bm_bh,
					     struct buffer_head *group_bh,
					     u32 first_new_cluster,
					     int new_clusters)
{
	int ret = 0;
	struct ocfs2_super *osb = OCFS2_SB(bm_inode->i_sb);
	struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bm_bh->b_data;
	struct ocfs2_chain_list *cl = &fe->id2.i_chain;
	struct ocfs2_chain_rec *cr;
	struct ocfs2_group_desc *group;
	u16 chain, num_bits, backups = 0;
	u16 cl_bpc = le16_to_cpu(cl->cl_bpc);
	u16 cl_cpg = le16_to_cpu(cl->cl_cpg);

	trace_ocfs2_update_last_group_and_inode(new_clusters,
						first_new_cluster);

	ret = ocfs2_journal_access_gd(handle, INODE_CACHE(bm_inode),
				      group_bh, OCFS2_JOURNAL_ACCESS_WRITE);
	if (ret < 0) {
		mlog_errno(ret);
		goto out;
	}

	group = (struct ocfs2_group_desc *)group_bh->b_data;

	/* update the group first. */
	num_bits = new_clusters * cl_bpc;
	le16_add_cpu(&group->bg_bits, num_bits);
	le16_add_cpu(&group->bg_free_bits_count, num_bits);

	/*
	 * check whether there are some new backup superblocks exist in
	 * this group and update the group bitmap accordingly.
	 */
	if (OCFS2_HAS_COMPAT_FEATURE(osb->sb,
				     OCFS2_FEATURE_COMPAT_BACKUP_SB)) {
		backups = ocfs2_calc_new_backup_super(bm_inode,
						     group,
						     new_clusters,
						     first_new_cluster,
						     cl_cpg, 1);
		le16_add_cpu(&group->bg_free_bits_count, -1 * backups);
	}

	ocfs2_journal_dirty(handle, group_bh);

	/* update the inode accordingly. */
	ret = ocfs2_journal_access_di(handle, INODE_CACHE(bm_inode), bm_bh,
				      OCFS2_JOURNAL_ACCESS_WRITE);
	if (ret < 0) {
		mlog_errno(ret);
		goto out_rollback;
	}

	chain = le16_to_cpu(group->bg_chain);
	cr = (&cl->cl_recs[chain]);
	le32_add_cpu(&cr->c_total, num_bits);
	le32_add_cpu(&cr->c_free, num_bits);
	le32_add_cpu(&fe->id1.bitmap1.i_total, num_bits);
	le32_add_cpu(&fe->i_clusters, new_clusters);

	if (backups) {
		le32_add_cpu(&cr->c_free, -1 * backups);
		le32_add_cpu(&fe->id1.bitmap1.i_used, backups);
	}

	spin_lock(&OCFS2_I(bm_inode)->ip_lock);
	OCFS2_I(bm_inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
	le64_add_cpu(&fe->i_size, new_clusters << osb->s_clustersize_bits);
	spin_unlock(&OCFS2_I(bm_inode)->ip_lock);
	i_size_write(bm_inode, le64_to_cpu(fe->i_size));

	ocfs2_journal_dirty(handle, bm_bh);

out_rollback:
	if (ret < 0) {
		ocfs2_calc_new_backup_super(bm_inode,
					    group,
					    new_clusters,
					    first_new_cluster,
					    cl_cpg, 0);
		le16_add_cpu(&group->bg_free_bits_count, backups);
		le16_add_cpu(&group->bg_bits, -1 * num_bits);
		le16_add_cpu(&group->bg_free_bits_count, -1 * num_bits);
	}
out:
	if (ret)
		mlog_errno(ret);
	return ret;
}
Exemple #20
0
Fichier : ie.c Projet : 7799/linux
/* Copy individual custom IEs for beacon, probe response and assoc response
 * and prepare single structure for IE setting.
 * This function also updates allocated IE indices from driver.
 */
static int
mwifiex_update_uap_custom_ie(struct mwifiex_private *priv,
			     struct mwifiex_ie *beacon_ie, u16 *beacon_idx,
			     struct mwifiex_ie *pr_ie, u16 *probe_idx,
			     struct mwifiex_ie *ar_ie, u16 *assoc_idx)
{
	struct mwifiex_ie_list *ap_custom_ie;
	u8 *pos;
	u16 len;
	int ret;

	ap_custom_ie = kzalloc(sizeof(*ap_custom_ie), GFP_KERNEL);
	if (!ap_custom_ie)
		return -ENOMEM;

	ap_custom_ie->type = cpu_to_le16(TLV_TYPE_MGMT_IE);
	pos = (u8 *)ap_custom_ie->ie_list;

	if (beacon_ie) {
		len = sizeof(struct mwifiex_ie) - IEEE_MAX_IE_SIZE +
		      le16_to_cpu(beacon_ie->ie_length);
		memcpy(pos, beacon_ie, len);
		pos += len;
		le16_add_cpu(&ap_custom_ie->len, len);
	}
	if (pr_ie) {
		len = sizeof(struct mwifiex_ie) - IEEE_MAX_IE_SIZE +
		      le16_to_cpu(pr_ie->ie_length);
		memcpy(pos, pr_ie, len);
		pos += len;
		le16_add_cpu(&ap_custom_ie->len, len);
	}
	if (ar_ie) {
		len = sizeof(struct mwifiex_ie) - IEEE_MAX_IE_SIZE +
		      le16_to_cpu(ar_ie->ie_length);
		memcpy(pos, ar_ie, len);
		pos += len;
		le16_add_cpu(&ap_custom_ie->len, len);
	}

	ret = mwifiex_update_autoindex_ies(priv, ap_custom_ie);

	pos = (u8 *)(&ap_custom_ie->ie_list[0].ie_index);
	if (beacon_ie && *beacon_idx == MWIFIEX_AUTO_IDX_MASK) {
		/* save beacon ie index after auto-indexing */
		*beacon_idx = le16_to_cpu(ap_custom_ie->ie_list[0].ie_index);
		len = sizeof(*beacon_ie) - IEEE_MAX_IE_SIZE +
		      le16_to_cpu(beacon_ie->ie_length);
		pos += len;
	}
	if (pr_ie && le16_to_cpu(pr_ie->ie_index) == MWIFIEX_AUTO_IDX_MASK) {
		/* save probe resp ie index after auto-indexing */
		*probe_idx = *((u16 *)pos);
		len = sizeof(*pr_ie) - IEEE_MAX_IE_SIZE +
		      le16_to_cpu(pr_ie->ie_length);
		pos += len;
	}
	if (ar_ie && le16_to_cpu(ar_ie->ie_index) == MWIFIEX_AUTO_IDX_MASK)
		/* save assoc resp ie index after auto-indexing */
		*assoc_idx = *((u16 *)pos);

	kfree(ap_custom_ie);
	return ret;
}