static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
{
	struct buffer_head *dibh;
	void *kaddr;
	int error;

	/*
	 * Due to the order of unstuffing files and ->fault(), we can be
	 * asked for a zero page in the case of a stuffed file being extended,
	 * so we need to supply one here. It doesn't happen often.
	 */
	if (unlikely(page->index)) {
		zero_user(page, 0, PAGE_CACHE_SIZE);
		return 0;
	}

	error = gfs2_meta_inode_buffer(ip, &dibh);
	if (error)
		return error;

	kaddr = kmap_atomic(page, KM_USER0);
	memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode),
	       ip->i_di.di_size);
	memset(kaddr + ip->i_di.di_size, 0, PAGE_CACHE_SIZE - ip->i_di.di_size);
	kunmap_atomic(kaddr, KM_USER0);
	flush_dcache_page(page);
	brelse(dibh);
	SetPageUptodate(page);

	return 0;
}
Beispiel #2
0
static int v9fs_write_end(struct file *filp, struct address_space *mapping,
                          loff_t pos, unsigned len, unsigned copied,
                          struct page *page, void *fsdata)
{
        loff_t last_pos = pos + copied;
        struct inode *inode = page->mapping->host;

        //p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);

        if (unlikely(copied < len)) {
                /*
                 * zero out the rest of the area
                 */
                unsigned from = pos & (PAGE_CACHE_SIZE - 1);

                zero_user(page, from + copied, len - copied);
                flush_dcache_page(page);
        }

        if (!PageUptodate(page))
                SetPageUptodate(page);
        /*
         * No need to use i_size_read() here, the i_size
         * cannot change under us because we hold the i_mutex.
         */
        if (last_pos > inode->i_size) {
                inode_add_bytes(inode, last_pos - inode->i_size);
                i_size_write(inode, last_pos);
        }
        set_page_dirty(page);
        unlock_page(page);
        page_cache_release(page);

        return copied;
}
Beispiel #3
0
static int truncate_partial_data_page(struct inode *inode, u64 from,
								bool cache_only)
{
	unsigned offset = from & (PAGE_CACHE_SIZE - 1);
	pgoff_t index = from >> PAGE_CACHE_SHIFT;
	struct address_space *mapping = inode->i_mapping;
	struct page *page;

	if (!offset && !cache_only)
		return 0;

	if (cache_only) {
		page = grab_cache_page(mapping, index);
		if (page && PageUptodate(page))
			goto truncate_out;
		f2fs_put_page(page, 1);
		return 0;
	}

	page = get_lock_data_page(inode, index);
	if (IS_ERR(page))
		return 0;
truncate_out:
	f2fs_wait_on_page_writeback(page, DATA);
	zero_user(page, offset, PAGE_CACHE_SIZE - offset);
	if (!cache_only || !f2fs_encrypted_inode(inode) || !S_ISREG(inode->i_mode))
		set_page_dirty(page);
	f2fs_put_page(page, 1);
	return 0;
}
static int ocfs2_readpage(struct file *file, struct page *page)
{
	struct inode *inode = page->mapping->host;
	struct ocfs2_inode_info *oi = OCFS2_I(inode);
	loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT;
	int ret, unlock = 1;

	mlog_entry("(0x%p, %lu)\n", file, (page ? page->index : 0));

	ret = ocfs2_inode_lock_with_page(inode, NULL, 0, page);
	if (ret != 0) {
		if (ret == AOP_TRUNCATED_PAGE)
			unlock = 0;
		mlog_errno(ret);
		goto out;
	}

	if (down_read_trylock(&oi->ip_alloc_sem) == 0) {
		ret = AOP_TRUNCATED_PAGE;
		goto out_inode_unlock;
	}

	/*
	 * i_size might have just been updated as we grabed the meta lock.  We
	 * might now be discovering a truncate that hit on another node.
	 * block_read_full_page->get_block freaks out if it is asked to read
	 * beyond the end of a file, so we check here.  Callers
	 * (generic_file_read, vm_ops->fault) are clever enough to check i_size
	 * and notice that the page they just read isn't needed.
	 *
	 * XXX sys_readahead() seems to get that wrong?
	 */
	if (start >= i_size_read(inode)) {
		zero_user(page, 0, PAGE_SIZE);
		SetPageUptodate(page);
		ret = 0;
		goto out_alloc;
	}

	if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
		ret = ocfs2_readpage_inline(inode, page);
	else
		ret = block_read_full_page(page, ocfs2_get_block);
	unlock = 0;

out_alloc:
	up_read(&OCFS2_I(inode)->ip_alloc_sem);
out_inode_unlock:
	ocfs2_inode_unlock(inode, 0);
out:
	if (unlock)
		unlock_page(page);
	mlog_exit(ret);
	return ret;
}
static int ocfs2_readpage(struct file *file, struct page *page)
{
	struct inode *inode = page->mapping->host;
	struct ocfs2_inode_info *oi = OCFS2_I(inode);
	loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT;
	int ret, unlock = 1;

	trace_ocfs2_readpage((unsigned long long)oi->ip_blkno,
			     (page ? page->index : 0));

	ret = ocfs2_inode_lock_with_page(inode, NULL, 0, page);
	if (ret != 0) {
		if (ret == AOP_TRUNCATED_PAGE)
			unlock = 0;
		mlog_errno(ret);
		goto out;
	}

	if (down_read_trylock(&oi->ip_alloc_sem) == 0) {
		ret = AOP_TRUNCATED_PAGE;
		unlock_page(page);
		unlock = 0;
		down_read(&oi->ip_alloc_sem);
		up_read(&oi->ip_alloc_sem);
		goto out_inode_unlock;
	}

	if (start >= i_size_read(inode)) {
		zero_user(page, 0, PAGE_SIZE);
		SetPageUptodate(page);
		ret = 0;
		goto out_alloc;
	}

	if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
		ret = ocfs2_readpage_inline(inode, page);
	else
		ret = block_read_full_page(page, ocfs2_get_block);
	unlock = 0;

out_alloc:
	up_read(&OCFS2_I(inode)->ip_alloc_sem);
out_inode_unlock:
	ocfs2_inode_unlock(inode, 0);
out:
	if (unlock)
		unlock_page(page);
	return ret;
}
Beispiel #6
0
/*
 * xfs_iozero clears the specified range supplied via the page cache (except in
 * the DAX case). Writes through the page cache will allocate blocks over holes,
 * though the callers usually map the holes first and avoid them. If a block is
 * not completely zeroed, then it will be read from disk before being partially
 * zeroed.
 *
 * In the DAX case, we can just directly write to the underlying pages. This
 * will not allocate blocks, but will avoid holes and unwritten extents and so
 * not do unnecessary work.
 */
int
xfs_iozero(
	struct xfs_inode	*ip,	/* inode			*/
	loff_t			pos,	/* offset in file		*/
	size_t			count)	/* size of data to zero		*/
{
	struct page		*page;
	struct address_space	*mapping;
	int			status = 0;


	mapping = VFS_I(ip)->i_mapping;
	do {
		unsigned offset, bytes;
		void *fsdata;

		offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
		bytes = PAGE_CACHE_SIZE - offset;
		if (bytes > count)
			bytes = count;

		if (IS_DAX(VFS_I(ip))) {
			status = dax_zero_page_range(VFS_I(ip), pos, bytes,
						     xfs_get_blocks_direct);
			if (status)
				break;
		} else {
			status = pagecache_write_begin(NULL, mapping, pos, bytes,
						AOP_FLAG_UNINTERRUPTIBLE,
						&page, &fsdata);
			if (status)
				break;

			zero_user(page, offset, bytes);

			status = pagecache_write_end(NULL, mapping, pos, bytes,
						bytes, page, fsdata);
			WARN_ON(status <= 0); /* can't return less than zero! */
			status = 0;
		}
		pos += bytes;
		count -= bytes;
	} while (count);

	return status;
}
Beispiel #7
0
static void fill_zero(struct inode *inode, pgoff_t index,
					loff_t start, loff_t len)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct page *page;

	if (!len)
		return;

	f2fs_balance_fs(sbi);

	f2fs_lock_op(sbi);
	page = get_new_data_page(inode, NULL, index, false);
	f2fs_unlock_op(sbi);

	if (!IS_ERR(page)) {
		f2fs_wait_on_page_writeback(page, DATA);
		zero_user(page, start, len);
		set_page_dirty(page);
		f2fs_put_page(page, 1);
	}
}
/*
 *	xfs_iozero
 *
 *	xfs_iozero clears the specified range of buffer supplied,
 *	and marks all the affected blocks as valid and modified.  If
 *	an affected block is not allocated, it will be allocated.  If
 *	an affected block is not completely overwritten, and is not
 *	valid before the operation, it will be read from disk before
 *	being partially zeroed.
 */
STATIC int
xfs_iozero(
    struct xfs_inode	*ip,
    loff_t			pos,
    size_t			count)
{
    struct page		*page;
    struct address_space	*mapping;
    int			status;

    mapping = VFS_I(ip)->i_mapping;
    do {
        unsigned offset, bytes;
        void *fsdata;

        offset = (pos & (PAGE_CACHE_SIZE -1));
        bytes = PAGE_CACHE_SIZE - offset;
        if (bytes > count)
            bytes = count;

        status = pagecache_write_begin(NULL, mapping, pos, bytes,
                                       AOP_FLAG_UNINTERRUPTIBLE,
                                       &page, &fsdata);
        if (status)
            break;

        zero_user(page, offset, bytes);

        status = pagecache_write_end(NULL, mapping, pos, bytes, bytes,
                                     page, fsdata);
        WARN_ON(status <= 0);
        pos += bytes;
        count -= bytes;
        status = 0;
    } while (count);

    return (-status);
}
Beispiel #9
0
/*
 *	xfs_iozero
 *
 *	xfs_iozero clears the specified range of buffer supplied,
 *	and marks all the affected blocks as valid and modified.  If
 *	an affected block is not allocated, it will be allocated.  If
 *	an affected block is not completely overwritten, and is not
 *	valid before the operation, it will be read from disk before
 *	being partially zeroed.
 */
STATIC int
xfs_iozero(
	struct xfs_inode	*ip,	/* inode			*/
	loff_t			pos,	/* offset in file		*/
	size_t			count)	/* size of data to zero		*/
{
	struct page		*page;
	struct address_space	*mapping;
	int			status;

	mapping = ip->i_vnode->i_mapping;
	do {
		unsigned offset, bytes;
		void *fsdata;

		offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
		bytes = PAGE_CACHE_SIZE - offset;
		if (bytes > count)
			bytes = count;

		status = pagecache_write_begin(NULL, mapping, pos, bytes,
					AOP_FLAG_UNINTERRUPTIBLE,
					&page, &fsdata);
		if (status)
			break;

		zero_user(page, offset, bytes);

		status = pagecache_write_end(NULL, mapping, pos, bytes, bytes,
					page, fsdata);
		WARN_ON(status <= 0); /* can't return less than zero! */
		pos += bytes;
		count -= bytes;
		status = 0;
	} while (count);

	return (-status);
}
Beispiel #10
0
static int truncate_partial_data_page(struct inode *inode, u64 from)
{
	unsigned offset = from & (PAGE_CACHE_SIZE - 1);
	struct page *page;

	if (!offset)
		return 0;

	page = find_data_page(inode, from >> PAGE_CACHE_SHIFT, false);
	if (IS_ERR(page))
		return 0;

	lock_page(page);
	if (unlikely(!PageUptodate(page) ||
			page->mapping != inode->i_mapping))
		goto out;

	f2fs_wait_on_page_writeback(page, DATA);
	zero_user(page, offset, PAGE_CACHE_SIZE - offset);
	set_page_dirty(page);
out:
	f2fs_put_page(page, 1);
	return 0;
}
Beispiel #11
0
/**
 * ecryptfs_readpage
 * @file: An eCryptfs file
 * @page: Page from eCryptfs inode mapping into which to stick the read data
 *
 * Read in a page, decrypting if necessary.
 *
 * Returns zero on success; non-zero on error.
 */
static int ecryptfs_readpage(struct file *file, struct page *page)
{
	struct ecryptfs_crypt_stat *crypt_stat =
		&ecryptfs_inode_to_private(page->mapping->host)->crypt_stat;
#ifdef CONFIG_CRYPTO_DEV_KFIPS
	struct ecryptfs_page_crypt_req *page_crypt_req = NULL;
#endif
	int rc = 0;

	if (!crypt_stat || !(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
		rc = ecryptfs_read_lower_page_segment(page, page->index, 0,
						      PAGE_CACHE_SIZE,
						      page->mapping->host);
	} else if (crypt_stat->flags & ECRYPTFS_VIEW_AS_ENCRYPTED) {
		if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) {
			rc = ecryptfs_copy_up_encrypted_with_header(page,
								    crypt_stat);
			if (rc) {
				printk(KERN_ERR "%s: Error attempting to copy "
				       "the encrypted content from the lower "
				       "file whilst inserting the metadata "
				       "from the xattr into the header; rc = "
				       "[%d]\n", __func__, rc);
				goto out;
			}

		} else {
			rc = ecryptfs_read_lower_page_segment(
				page, page->index, 0, PAGE_CACHE_SIZE,
				page->mapping->host);
			if (rc) {
				printk(KERN_ERR "Error reading page; rc = "
				       "[%d]\n", rc);
				goto out;
			}
		}
	} else {
#ifndef CONFIG_CRYPTO_DEV_KFIPS
		rc = ecryptfs_decrypt_page(page);
		if (rc) {
			ecryptfs_printk(KERN_ERR, "Error decrypting page; "
					"rc = [%d]\n", rc);
#else
		page_crypt_req = ecryptfs_alloc_page_crypt_req(
					page, ecryptfs_readpage_complete);
		if (!page_crypt_req) {
			rc = -ENOMEM;
			ecryptfs_printk(KERN_ERR,
					"Failed to allocate page crypt request "
					"for decryption\n");
#endif
			goto out;
		}
#ifdef CONFIG_CRYPTO_DEV_KFIPS
		ecryptfs_decrypt_page_async(page_crypt_req);
		goto out_async_started;
#endif
	}
out:
#ifndef CONFIG_CRYPTO_DEV_KFIPS
	if (rc)
#else
	if (unlikely(rc))
#endif
		ClearPageUptodate(page);
	else
		SetPageUptodate(page);
	ecryptfs_printk(KERN_DEBUG, "Unlocking page with index = [0x%.16lx]\n",
			page->index);
	unlock_page(page);
#ifdef CONFIG_CRYPTO_DEV_KFIPS
out_async_started:
#endif
	return rc;
}

/**
 * Called with lower inode mutex held.
 */
static int fill_zeros_to_end_of_page(struct page *page, unsigned int to)
{
	struct inode *inode = page->mapping->host;
	int end_byte_in_page;

	if ((i_size_read(inode) / PAGE_CACHE_SIZE) != page->index)
		goto out;
	end_byte_in_page = i_size_read(inode) % PAGE_CACHE_SIZE;
	if (to > end_byte_in_page)
		end_byte_in_page = to;
	zero_user_segment(page, end_byte_in_page, PAGE_CACHE_SIZE);
out:
	return 0;
}

/**
 * ecryptfs_write_begin
 * @file: The eCryptfs file
 * @mapping: The eCryptfs object
 * @pos: The file offset at which to start writing
 * @len: Length of the write
 * @flags: Various flags
 * @pagep: Pointer to return the page
 * @fsdata: Pointer to return fs data (unused)
 *
 * This function must zero any hole we create
 *
 * Returns zero on success; non-zero otherwise
 */
static int ecryptfs_write_begin(struct file *file,
			struct address_space *mapping,
			loff_t pos, unsigned len, unsigned flags,
			struct page **pagep, void **fsdata)
{
	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
	struct page *page;
	loff_t prev_page_end_size;
	int rc = 0;

	page = grab_cache_page_write_begin(mapping, index, flags);
	if (!page)
		return -ENOMEM;
	*pagep = page;

	prev_page_end_size = ((loff_t)index << PAGE_CACHE_SHIFT);
	if (!PageUptodate(page)) {
		struct ecryptfs_crypt_stat *crypt_stat =
			&ecryptfs_inode_to_private(mapping->host)->crypt_stat;

		if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
			rc = ecryptfs_read_lower_page_segment(
				page, index, 0, PAGE_CACHE_SIZE, mapping->host);
			if (rc) {
				printk(KERN_ERR "%s: Error attemping to read "
				       "lower page segment; rc = [%d]\n",
				       __func__, rc);
				ClearPageUptodate(page);
				goto out;
			} else
				SetPageUptodate(page);
		} else if (crypt_stat->flags & ECRYPTFS_VIEW_AS_ENCRYPTED) {
			if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) {
				rc = ecryptfs_copy_up_encrypted_with_header(
					page, crypt_stat);
				if (rc) {
					printk(KERN_ERR "%s: Error attempting "
					       "to copy the encrypted content "
					       "from the lower file whilst "
					       "inserting the metadata from "
					       "the xattr into the header; rc "
					       "= [%d]\n", __func__, rc);
					ClearPageUptodate(page);
					goto out;
				}
				SetPageUptodate(page);
			} else {
				rc = ecryptfs_read_lower_page_segment(
					page, index, 0, PAGE_CACHE_SIZE,
					mapping->host);
				if (rc) {
					printk(KERN_ERR "%s: Error reading "
					       "page; rc = [%d]\n",
					       __func__, rc);
					ClearPageUptodate(page);
					goto out;
				}
				SetPageUptodate(page);
			}
		} else {
			if (prev_page_end_size
			    >= i_size_read(page->mapping->host)) {
				zero_user(page, 0, PAGE_CACHE_SIZE);
			} else {
				rc = ecryptfs_decrypt_page(page);
				if (rc) {
					printk(KERN_ERR "%s: Error decrypting "
					       "page at index [%ld]; "
					       "rc = [%d]\n",
					       __func__, page->index, rc);
					ClearPageUptodate(page);
					goto out;
				}
			}
			SetPageUptodate(page);
		}
	}
	/* If creating a page or more of holes, zero them out via truncate.
	 * Note, this will increase i_size. */
	if (index != 0) {
		if (prev_page_end_size > i_size_read(page->mapping->host)) {
			rc = ecryptfs_truncate(file->f_path.dentry,
					       prev_page_end_size);
			if (rc) {
				printk(KERN_ERR "%s: Error on attempt to "
				       "truncate to (higher) offset [%lld];"
				       " rc = [%d]\n", __func__,
				       prev_page_end_size, rc);
				goto out;
			}
		}
	}
	/* Writing to a new page, and creating a small hole from start
	 * of page?  Zero it out. */
	if ((i_size_read(mapping->host) == prev_page_end_size)
	    && (pos != 0))
		zero_user(page, 0, PAGE_CACHE_SIZE);
out:
	if (unlikely(rc)) {
		unlock_page(page);
		page_cache_release(page);
		*pagep = NULL;
	}
	return rc;
}
Beispiel #12
0
/**
 * ecryptfs_prepare_write
 * @file: The eCryptfs file
 * @page: The eCryptfs page
 * @from: The start byte from which we will write
 * @to: The end byte to which we will write
 *
 * This function must zero any hole we create
 *
 * Returns zero on success; non-zero otherwise
 */
static int ecryptfs_prepare_write(struct file *file, struct page *page,
				  unsigned from, unsigned to)
{
	loff_t prev_page_end_size;
	int rc = 0;

	if (!PageUptodate(page)) {
		struct ecryptfs_crypt_stat *crypt_stat =
			&ecryptfs_inode_to_private(
				file->f_dentry->d_inode)->crypt_stat;

		if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)
		    || (crypt_stat->flags & ECRYPTFS_NEW_FILE)) {
			rc = ecryptfs_read_lower_page_segment(
				page, page->index, 0, PAGE_CACHE_SIZE,
				page->mapping->host);
			if (rc) {
				printk(KERN_ERR "%s: Error attemping to read "
				       "lower page segment; rc = [%d]\n",
				       __func__, rc);
				ClearPageUptodate(page);
				goto out;
			} else
				SetPageUptodate(page);
		} else if (crypt_stat->flags & ECRYPTFS_VIEW_AS_ENCRYPTED) {
			if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) {
				rc = ecryptfs_copy_up_encrypted_with_header(
					page, crypt_stat);
				if (rc) {
					printk(KERN_ERR "%s: Error attempting "
					       "to copy the encrypted content "
					       "from the lower file whilst "
					       "inserting the metadata from "
					       "the xattr into the header; rc "
					       "= [%d]\n", __func__, rc);
					ClearPageUptodate(page);
					goto out;
				}
				SetPageUptodate(page);
			} else {
				rc = ecryptfs_read_lower_page_segment(
					page, page->index, 0, PAGE_CACHE_SIZE,
					page->mapping->host);
				if (rc) {
					printk(KERN_ERR "%s: Error reading "
					       "page; rc = [%d]\n",
					       __func__, rc);
					ClearPageUptodate(page);
					goto out;
				}
				SetPageUptodate(page);
			}
		} else {
			rc = ecryptfs_decrypt_page(page);
			if (rc) {
				printk(KERN_ERR "%s: Error decrypting page "
				       "at index [%ld]; rc = [%d]\n",
				       __func__, page->index, rc);
				ClearPageUptodate(page);
				goto out;
			}
			SetPageUptodate(page);
		}
	}
	prev_page_end_size = ((loff_t)page->index << PAGE_CACHE_SHIFT);
	/* If creating a page or more of holes, zero them out via truncate.
	 * Note, this will increase i_size. */
	if (page->index != 0) {
		if (prev_page_end_size > i_size_read(page->mapping->host)) {
			rc = ecryptfs_truncate(file->f_dentry,
					       prev_page_end_size);
			if (rc) {
				printk(KERN_ERR "%s: Error on attempt to "
				       "truncate to (higher) offset [%lld];"
				       " rc = [%d]\n", __func__,
				       prev_page_end_size, rc);
				goto out;
			}
		}
	}
	/* Writing to a new page, and creating a small hole from start
	 * of page?  Zero it out. */
	if ((i_size_read(page->mapping->host) == prev_page_end_size)
	    && (from != 0))
		zero_user(page, 0, PAGE_CACHE_SIZE);
out:
	return rc;
}