static void shmem_free_block(struct inode *inode)
{
	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
	spin_lock(&sbinfo->stat_lock);
	sbinfo->free_blocks++;
	inode->i_blocks -= BLOCKS_PER_PAGE;
	spin_unlock(&sbinfo->stat_lock);
}
/*
 * shmem_swp_alloc - get the position of the swap entry for the page.
 *                   If it does not exist allocate the entry.
 *
 * @info:	info structure for the inode
 * @index:	index of the page to find
 * @sgp:	check and recheck i_size? skip allocation?
 */
static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp)
{
	struct inode *inode = info->inode;
	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
	unsigned long page = 0;
	swp_entry_t *entry;
	static const swp_entry_t unswapped = {0};

	if (sgp != SGP_WRITE &&
	    ((loff_t) index << PAGE_CACHE_SHIFT) >= inode->i_size)
		return ERR_PTR(-EINVAL);

	while (!(entry = shmem_swp_entry(info, index, &page))) {
		if (sgp == SGP_READ)
			return (swp_entry_t *) &unswapped;
		/*
		 * Test free_blocks against 1 not 0, since we have 1 data
		 * page (and perhaps indirect index pages) yet to allocate:
		 * a waste to allocate index if we cannot allocate data.
		 */
		spin_lock(&sbinfo->stat_lock);
		if (sbinfo->free_blocks <= 1) {
			spin_unlock(&sbinfo->stat_lock);
			return ERR_PTR(-ENOSPC);
		}
		sbinfo->free_blocks--;
		inode->i_blocks += BLOCKS_PER_PAGE;
		spin_unlock(&sbinfo->stat_lock);

		spin_unlock(&info->lock);
		page = get_zeroed_page(GFP_USER);
		spin_lock(&info->lock);

		if (!page) {
			shmem_free_block(inode);
			return ERR_PTR(-ENOMEM);
		}
		if (sgp != SGP_WRITE &&
		    ((loff_t) index << PAGE_CACHE_SHIFT) >= inode->i_size) {
			entry = ERR_PTR(-EINVAL);
			break;
		}
		if (info->next_index <= index)
			info->next_index = index + 1;
	}
	if (page) {
		/* another task gave its page, or truncated the file */
		shmem_free_block(inode);
		free_page(page);
	}
	if (info->next_index <= index && !IS_ERR(entry))
		info->next_index = index + 1;
	return entry;
}
static int CVE_2013_1767_linux2_6_23_shmem_remount_fs(struct super_block *sb, int *flags, char *data)
{
	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
	unsigned long max_blocks = sbinfo->max_blocks;
	unsigned long max_inodes = sbinfo->max_inodes;
	int policy = sbinfo->policy;
	nodemask_t policy_nodes = sbinfo->policy_nodes;
	unsigned long blocks;
	unsigned long inodes;
	int error = -EINVAL;

	if (shmem_parse_options(data, NULL, NULL, NULL, &max_blocks,
				&max_inodes, &policy, &policy_nodes))
		return error;

	spin_lock(&sbinfo->stat_lock);
	blocks = sbinfo->max_blocks - sbinfo->free_blocks;
	inodes = sbinfo->max_inodes - sbinfo->free_inodes;
	if (max_blocks < blocks)
		goto out;
	if (max_inodes < inodes)
		goto out;
	/*
	 * Those tests also disallow limited->unlimited while any are in
	 * use, so i_blocks will always be zero when max_blocks is zero;
	 * but we must separately disallow unlimited->limited, because
	 * in that case we have no record of how much is already in use.
	 */
	if (max_blocks && !sbinfo->max_blocks)
		goto out;
	if (max_inodes && !sbinfo->max_inodes)
		goto out;

	error = 0;
	sbinfo->max_blocks  = max_blocks;
	sbinfo->free_blocks = max_blocks - blocks;
	sbinfo->max_inodes  = max_inodes;
	sbinfo->free_inodes = max_inodes - inodes;
	sbinfo->policy = policy;
	sbinfo->policy_nodes = policy_nodes;
out:
	spin_unlock(&sbinfo->stat_lock);
	return error;
}
static void shmem_truncate(struct inode *inode)
{
	struct shmem_inode_info *info = SHMEM_I(inode);
	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
	unsigned long freed = 0;
	unsigned long index;

	inode->i_ctime = inode->i_mtime = CURRENT_TIME;
	index = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
	if (index >= info->next_index)
		return;

	spin_lock(&info->lock);
	while (index < info->next_index)
		freed += shmem_truncate_indirect(info, index);
	BUG_ON(info->swapped > info->next_index);

	if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) {
		/*
		 * Call truncate_inode_pages again: racing shmem_unuse_inode
		 * may have swizzled a page in from swap since vmtruncate or
		 * generic_delete_inode did it, before we lowered next_index.
		 * Also, though shmem_getpage checks i_size before adding to
		 * cache, no recheck after: so fix the narrow window there too.
		 */
		info->flags |= SHMEM_TRUNCATE;
		spin_unlock(&info->lock);
		truncate_inode_pages(inode->i_mapping, inode->i_size);
		spin_lock(&info->lock);
		info->flags &= ~SHMEM_TRUNCATE;
	}

	spin_unlock(&info->lock);
	spin_lock(&sbinfo->stat_lock);
	sbinfo->free_blocks += freed;
	inode->i_blocks -= freed*BLOCKS_PER_PAGE;
	spin_unlock(&sbinfo->stat_lock);
}