コード例 #1
0
static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
{
	struct inode *inode = dentry->d_inode;
	struct page *page = NULL;
	int error;

	if (attr->ia_valid & ATTR_SIZE) {
		if (attr->ia_size < inode->i_size) {
			/*
			 * If truncating down to a partial page, then
			 * if that page is already allocated, hold it
			 * in memory until the truncation is over, so
			 * truncate_partial_page cannnot miss it were
			 * it assigned to swap.
			 */
			if (attr->ia_size & (PAGE_CACHE_SIZE-1)) {
				(void) shmem_getpage(inode,
					attr->ia_size>>PAGE_CACHE_SHIFT,
						&page, SGP_READ);
			}
			/*
			 * Reset SHMEM_PAGEIN flag so that shmem_truncate can
			 * detect if any pages might have been added to cache
			 * after truncate_inode_pages.  But we needn't bother
			 * if it's being fully truncated to zero-length: the
			 * nrpages check is efficient enough in that case.
			 */
			if (attr->ia_size) {
				struct shmem_inode_info *info = SHMEM_I(inode);
				spin_lock(&info->lock);
				info->flags &= ~SHMEM_PAGEIN;
				spin_unlock(&info->lock);
			}
		}
	}
コード例 #2
0
/**
 * shmem_get_acl  -   generic_acl_operations->getacl() operation
 */
static struct posix_acl *
shmem_get_acl(struct inode *inode, int type)
{
	struct posix_acl *acl = NULL;

	spin_lock(&inode->i_lock);
	switch(type) {
		case ACL_TYPE_ACCESS:
			acl = posix_acl_dup(SHMEM_I(inode)->i_acl);
			break;

		case ACL_TYPE_DEFAULT:
			acl = posix_acl_dup(SHMEM_I(inode)->i_default_acl);
			break;
	}
	spin_unlock(&inode->i_lock);

	return acl;
}
コード例 #3
0
/**
 * shmem_set_acl  -   generic_acl_operations->setacl() operation
 */
static void
shmem_set_acl(struct inode *inode, int type, struct posix_acl *acl)
{
	struct posix_acl *free = NULL;

	spin_lock(&inode->i_lock);
	switch(type) {
		case ACL_TYPE_ACCESS:
			free = SHMEM_I(inode)->i_acl;
			SHMEM_I(inode)->i_acl = posix_acl_dup(acl);
			break;

		case ACL_TYPE_DEFAULT:
			free = SHMEM_I(inode)->i_default_acl;
			SHMEM_I(inode)->i_default_acl = posix_acl_dup(acl);
			break;
	}
	spin_unlock(&inode->i_lock);
	posix_acl_release(free);
}
コード例 #4
0
void
shmem_acl_destroy_inode(struct inode *inode)
{
	if (SHMEM_I(inode)->i_acl)
		posix_acl_release(SHMEM_I(inode)->i_acl);
	SHMEM_I(inode)->i_acl = NULL;
	if (SHMEM_I(inode)->i_default_acl)
		posix_acl_release(SHMEM_I(inode)->i_default_acl);
	SHMEM_I(inode)->i_default_acl = NULL;
}
コード例 #5
0
static void shmem_truncate(struct inode *inode)
{
	struct shmem_inode_info *info = SHMEM_I(inode);
	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
	unsigned long freed = 0;
	unsigned long index;

	inode->i_ctime = inode->i_mtime = CURRENT_TIME;
	index = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
	if (index >= info->next_index)
		return;

	spin_lock(&info->lock);
	while (index < info->next_index)
		freed += shmem_truncate_indirect(info, index);
	BUG_ON(info->swapped > info->next_index);

	if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) {
		/*
		 * Call truncate_inode_pages again: racing shmem_unuse_inode
		 * may have swizzled a page in from swap since vmtruncate or
		 * generic_delete_inode did it, before we lowered next_index.
		 * Also, though shmem_getpage checks i_size before adding to
		 * cache, no recheck after: so fix the narrow window there too.
		 */
		info->flags |= SHMEM_TRUNCATE;
		spin_unlock(&info->lock);
		truncate_inode_pages(inode->i_mapping, inode->i_size);
		spin_lock(&info->lock);
		info->flags &= ~SHMEM_TRUNCATE;
	}

	spin_unlock(&info->lock);
	spin_lock(&sbinfo->stat_lock);
	sbinfo->free_blocks += freed;
	inode->i_blocks -= freed*BLOCKS_PER_PAGE;
	spin_unlock(&sbinfo->stat_lock);
}