Exemplo n.º 1
0
/**
 * invalidate_inode_pages2 - remove all unmapped pages from an address_space
 * @mapping - the address_space
 *
 * invalidate_inode_pages2() is like truncate_inode_pages(), except for the case
 * where the page is seen to be mapped into process pagetables.  In that case,
 * the page is marked clean but is left attached to its address_space.
 *
 * The page is also marked not uptodate so that a subsequent pagefault will
 * perform I/O to bringthe page's contents back into sync with its backing
 * store.
 *
 * FIXME: invalidate_inode_pages2() is probably trivially livelockable.
 */
void invalidate_inode_pages2(struct address_space *mapping)
{
	struct pagevec pvec;
	pgoff_t next = 0;
	int i;

	pagevec_init(&pvec, 0);
	while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
		for (i = 0; i < pagevec_count(&pvec); i++) {
			struct page *page = pvec.pages[i];

			lock_page(page);
			if (page->mapping == mapping) {	/* truncate race? */
				wait_on_page_writeback(page);
				next = page->index + 1;
				if (page_mapped(page)) {
					clear_page_dirty(page);
					ClearPageUptodate(page);
				} else {
					if (!invalidate_complete_page(mapping,
								      page)) {
						clear_page_dirty(page);
						ClearPageUptodate(page);
					}
				}
			}
			unlock_page(page);
		}
		pagevec_release(&pvec);
		cond_resched();
	}
}
Exemplo n.º 2
0
/**
 * ecryptfs_writepage
 * @page: Page that is locked before this call is made
 *
 * Returns zero on success; non-zero otherwise
 *
 * This is where we encrypt the data and pass the encrypted data to
 * the lower filesystem.  In OpenPGP-compatible mode, we operate on
 * entire underlying packets.
 */
static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc)
{
    int rc;
#if 1 // FEATURE_SDCARD_ENCRYPTION
    struct inode *ecryptfs_inode;
    struct ecryptfs_crypt_stat *crypt_stat =
        &ecryptfs_inode_to_private(page->mapping->host)->crypt_stat;
    ecryptfs_inode = page->mapping->host;
#endif

    /*
     * Refuse to write the page out if we are called from reclaim context
     * since our writepage() path may potentially allocate memory when
     * calling into the lower fs vfs_write() which may in turn invoke
     * us again.
     */
    if (current->flags & PF_MEMALLOC) {
        redirty_page_for_writepage(wbc, page);
        rc = 0;
        goto out;
    }

#if 1 // FEATURE_SDCARD_ENCRYPTION
    if (!crypt_stat || !(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
        ecryptfs_printk(KERN_DEBUG,
                        "Passing through unencrypted page\n");
        rc = ecryptfs_write_lower_page_segment(ecryptfs_inode, page,
                                               0, PAGE_CACHE_SIZE);
        if (rc) {
            ClearPageUptodate(page);
            goto out;
        }
        SetPageUptodate(page);
    } else {
        rc = ecryptfs_encrypt_page(page);
        if (rc) {
            ecryptfs_printk(KERN_WARNING, "Error encrypting "
                            "page (upper index [0x%.16lx])\n", page->index);
            ClearPageUptodate(page);
            goto out;
        }
        SetPageUptodate(page);
    }
#else
    rc = ecryptfs_encrypt_page(page);
    if (rc) {
        ecryptfs_printk(KERN_WARNING, "Error encrypting "
                        "page (upper index [0x%.16lx])\n", page->index);
        ClearPageUptodate(page);
        goto out;
    }
    SetPageUptodate(page);
#endif
out:
    unlock_page(page);
    return rc;
}
Exemplo n.º 3
0
/**
 * ecryptfs_writepage
 * @page: Page that is locked before this call is made
 *
 * Returns zero on success; non-zero otherwise
<<<<<<< HEAD
=======
<<<<<<< HEAD
>>>>>>> ae1773bb70f3d7cf73324ce8fba787e01d8fa9f2
 *
 * This is where we encrypt the data and pass the encrypted data to
 * the lower filesystem.  In OpenPGP-compatible mode, we operate on
 * entire underlying packets.
<<<<<<< HEAD
=======
=======
>>>>>>> 58a75b6a81be54a8b491263ca1af243e9d8617b9
>>>>>>> ae1773bb70f3d7cf73324ce8fba787e01d8fa9f2
 */
static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc)
{
	int rc;

	/*
	 * Refuse to write the page out if we are called from reclaim context
	 * since our writepage() path may potentially allocate memory when
	 * calling into the lower fs vfs_write() which may in turn invoke
	 * us again.
	 */
	if (current->flags & PF_MEMALLOC) {
		redirty_page_for_writepage(wbc, page);
		rc = 0;
		goto out;
	}

	rc = ecryptfs_encrypt_page(page);
	if (rc) {
		ecryptfs_printk(KERN_WARNING, "Error encrypting "
				"page (upper index [0x%.16lx])\n", page->index);
		ClearPageUptodate(page);
		goto out;
	}
	SetPageUptodate(page);
out:
	unlock_page(page);
	return rc;
}
Exemplo n.º 4
0
/*
 * I/O completion handler for multipage BIOs.
 *
 * The mpage code never puts partial pages into a BIO (except for end-of-file).
 * If a page does not map to a contiguous run of blocks then it simply falls
 * back to block_read_full_page().
 *
 * Why is this?  If a page's completion depends on a number of different BIOs
 * which can complete in any order (or at the same time) then determining the
 * status of that page is hard.  See end_buffer_async_read() for the details.
 * There is no point in duplicating all that complexity.
 */
static void mpage_end_io_read(struct bio *bio, int err)
{
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;

	do {
		struct page *page = bvec->bv_page;

		if (--bvec >= bio->bi_io_vec)
			prefetchw(&bvec->bv_page->flags);

		if (uptodate) {
			SetPageUptodate(page);
		} else {
			ClearPageUptodate(page);
			SetPageError(page);
		}
		if (bio_flagged(bio, BIO_BAIO)) {
			struct ba_iocb *baiocb =
				(struct ba_iocb *)bio->bi_private2;
		       	BUG_ON(!PageBaio(page));
			ClearPageBaio(page);
			if (!uptodate)
				baiocb->io_error = -EIO;
			baiocb->result += bvec->bv_len;
			baiocb_put(baiocb);
		}
		unlock_page(page);
	} while (bvec >= bio->bi_io_vec);
	bio_put(bio);
}
Exemplo n.º 5
0
/*
 * I/O completion handler for multipage BIOs.
 *
 * The mpage code never puts partial pages into a BIO (except for end-of-file).
 * If a page does not map to a contiguous run of blocks then it simply falls
 * back to block_read_full_page().
 *
 * Why is this?  If a page's completion depends on a number of different BIOs
 * which can complete in any order (or at the same time) then determining the
 * status of that page is hard.  See end_buffer_async_read() for the details.
 * There is no point in duplicating all that complexity.
 */
static int mpage_end_io_read(struct bio *bio, unsigned int bytes_done, int err)
{
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;

	if (bio->bi_size)
		return 1;

	do {
		struct page *page = bvec->bv_page;

		if (--bvec >= bio->bi_io_vec)
			prefetchw(&bvec->bv_page->flags);

		if (uptodate) {
			SetPageUptodate(page);
		} else {
			ClearPageUptodate(page);
			SetPageError(page);
		}
		unlock_page(page);
	} while (bvec >= bio->bi_io_vec);
	bio_put(bio);
	return 0;
}
Exemplo n.º 6
0
/**
 * nilfs_copy_buffer -- copy buffer data and flags
 * @dbh: destination buffer
 * @sbh: source buffer
 */
void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh)
{
	void *kaddr0, *kaddr1;
	unsigned long bits;
	struct page *spage = sbh->b_page, *dpage = dbh->b_page;
	struct buffer_head *bh;

	kaddr0 = kmap_atomic(spage);
	kaddr1 = kmap_atomic(dpage);
	memcpy(kaddr1 + bh_offset(dbh), kaddr0 + bh_offset(sbh), sbh->b_size);
	kunmap_atomic(kaddr1);
	kunmap_atomic(kaddr0);

	dbh->b_state = sbh->b_state & NILFS_BUFFER_INHERENT_BITS;
	dbh->b_blocknr = sbh->b_blocknr;
	dbh->b_bdev = sbh->b_bdev;

	bh = dbh;
	bits = sbh->b_state & (BIT(BH_Uptodate) | BIT(BH_Mapped));
	while ((bh = bh->b_this_page) != dbh) {
		lock_buffer(bh);
		bits &= bh->b_state;
		unlock_buffer(bh);
	}
	if (bits & BIT(BH_Uptodate))
		SetPageUptodate(dpage);
	else
		ClearPageUptodate(dpage);
	if (bits & BIT(BH_Mapped))
		SetPageMappedToDisk(dpage);
	else
		ClearPageMappedToDisk(dpage);
}
Exemplo n.º 7
0
static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
{
    struct super_block *sb = sdp->sd_vfs;
    struct gfs2_sb *p;
    struct page *page;
    struct bio *bio;

    page = alloc_page(GFP_NOFS);
    if (unlikely(!page))
        return -ENOBUFS;

    ClearPageUptodate(page);
    ClearPageDirty(page);
    lock_page(page);

    bio = bio_alloc(GFP_NOFS, 1);
    bio->bi_sector = sector * (sb->s_blocksize >> 9);
    bio->bi_bdev = sb->s_bdev;
    bio_add_page(bio, page, PAGE_SIZE, 0);

    bio->bi_end_io = end_bio_io_page;
    bio->bi_private = page;
    submit_bio(READ_SYNC | REQ_META, bio);
    wait_on_page_locked(page);
    bio_put(bio);
    if (!PageUptodate(page)) {
        __free_page(page);
        return -EIO;
    }
    p = kmap(page);
    gfs2_sb_in(sdp, p);
    kunmap(page);
    __free_page(page);
    return gfs2_check_sb(sdp, silent);
}
Exemplo n.º 8
0
/*
 * It only removes the dentry from the dentry page,corresponding name
 * entry in name page does not need to be touched during deletion.
 */
void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
						struct inode *inode)
{
	struct	f2fs_dentry_block *dentry_blk;
	unsigned int bit_pos;
	struct address_space *mapping = page->mapping;
	struct inode *dir = mapping->host;
	struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
	int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len));
	void *kaddr = page_address(page);
	int i;

	lock_page(page);
	wait_on_page_writeback(page);

	dentry_blk = (struct f2fs_dentry_block *)kaddr;
	bit_pos = dentry - (struct f2fs_dir_entry *)dentry_blk->dentry;
	for (i = 0; i < slots; i++)
		test_and_clear_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap);

	/* Let's check and deallocate this dentry page */
	bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
			NR_DENTRY_IN_BLOCK,
			0);
	kunmap(page); /* kunmap - pair of f2fs_find_entry */
	set_page_dirty(page);

	dir->i_ctime = dir->i_mtime = CURRENT_TIME;

	if (inode && S_ISDIR(inode->i_mode)) {
		drop_nlink(dir);
		update_inode_page(dir);
	} else {
		mark_inode_dirty(dir);
	}

	if (inode) {
		inode->i_ctime = CURRENT_TIME;
		drop_nlink(inode);
		if (S_ISDIR(inode->i_mode)) {
			drop_nlink(inode);
			i_size_write(inode, 0);
		}
		update_inode_page(inode);

		if (inode->i_nlink == 0)
			add_orphan_inode(sbi, inode->i_ino);
		else
			release_orphan_inode(sbi);
	}

	if (bit_pos == NR_DENTRY_IN_BLOCK) {
		truncate_hole(dir, page->index, page->index + 1);
		clear_page_dirty_for_io(page);
		ClearPageUptodate(page);
		dec_page_count(sbi, F2FS_DIRTY_DENTS);
		inode_dec_dirty_dents(dir);
	}
	f2fs_put_page(page, 1);
}
Exemplo n.º 9
0
static void f2fs_read_end_io(struct bio *bio)
{
    struct bio_vec *bvec;
    int i;

    if (f2fs_bio_encrypted(bio)) {
        if (bio->bi_error) {
            f2fs_release_crypto_ctx(bio->bi_private);
        } else {
            f2fs_end_io_crypto_work(bio->bi_private, bio);
            return;
        }
    }

    bio_for_each_segment_all(bvec, bio, i) {
        struct page *page = bvec->bv_page;

        if (!bio->bi_error) {
            SetPageUptodate(page);
        } else {
            ClearPageUptodate(page);
            SetPageError(page);
        }
        unlock_page(page);
    }
    bio_put(bio);
}
Exemplo n.º 10
0
/**
 * ecryptfs_writepage
 * @page: Page that is locked before this call is made
 *
 * Returns zero on success; non-zero otherwise
 *
 * This is where we encrypt the data and pass the encrypted data to
 * the lower filesystem.  In OpenPGP-compatible mode, we operate on
 * entire underlying packets.
 */
static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc)
{
	int rc;

	// WTL_EDM_START
	/* MDM 3.1 START */
	struct inode *inode;
	struct ecryptfs_crypt_stat *crypt_stat;

	inode = page->mapping->host;
	crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
	if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
		size_t size;
		loff_t file_size = i_size_read(inode);
		pgoff_t end_page_index = file_size >> PAGE_CACHE_SHIFT;
		if (end_page_index < page->index)
			size = 0;
		else if (end_page_index == page->index)
			size = file_size & ~PAGE_CACHE_MASK;
		else
			size = PAGE_CACHE_SIZE;

		rc = ecryptfs_write_lower_page_segment(inode, page, 0,
						       size);
		if (unlikely(rc)) {
			ecryptfs_printk(KERN_WARNING, "Error write ""page (upper index [0x%.16lx])\n", page->index);
			ClearPageUptodate(page);
		} else
			SetPageUptodate(page);
		goto out;
	}
Exemplo n.º 11
0
/* completion handler for BIO writes */
static int bi_write_complete(struct bio *bio, unsigned int bytes_done, int error)
{
    const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
    struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;

    if (bio->bi_size)
        return 1;

    if(!uptodate)
        err("bi_write_complete: not uptodate\n");

    do {
        struct page *page = bvec->bv_page;
        DEBUG(3, "Cleaning up page %ld\n", page->index);
        if (--bvec >= bio->bi_io_vec)
            prefetchw(&bvec->bv_page->flags);

        if (uptodate) {
            SetPageUptodate(page);
        } else {
            ClearPageUptodate(page);
            SetPageError(page);
        }
        ClearPageDirty(page);
        unlock_page(page);
        page_cache_release(page);
    } while (bvec >= bio->bi_io_vec);

    complete((struct completion*)bio->bi_private);
    return 0;
}
Exemplo n.º 12
0
/*
 * I/O completion handler for multipage BIOs.
 *
 * The mpage code never puts partial pages into a BIO (except for end-of-file).
 * If a page does not map to a contiguous run of blocks then it simply falls
 * back to block_read_full_page().
 *
 * Why is this?  If a page's completion depends on a number of different BIOs
 * which can complete in any order (or at the same time) then determining the
 * status of that page is hard.  See end_buffer_async_read() for the details.
 * There is no point in duplicating all that complexity.
 */
static void mpage_end_io(struct bio *bio, int err)
{
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;

	do {
		struct page *page = bvec->bv_page;

		if (--bvec >= bio->bi_io_vec)
			prefetchw(&bvec->bv_page->flags);
		if (bio_data_dir(bio) == READ) {
			if (uptodate) {
				SetPageUptodate(page);
			} else {
				ClearPageUptodate(page);
				SetPageError(page);
			}
			unlock_page(page);
		} else { /* bio_data_dir(bio) == WRITE */
			if (!uptodate) {
				SetPageError(page);
				if (page->mapping)
					set_bit(AS_EIO, &page->mapping->flags);
			}
			end_page_writeback(page);
		}
	} while (bvec >= bio->bi_io_vec);
	bio_put(bio);
}
Exemplo n.º 13
0
/*
 * Populate a page with data for the Linux page cache.  This function is
 * only used to support mmap(2).  There will be an identical copy of the
 * data in the ARC which is kept up to date via .write() and .writepage().
 *
 * Current this function relies on zpl_read_common() and the O_DIRECT
 * flag to read in a page.  This works but the more correct way is to
 * update zfs_fillpage() to be Linux friendly and use that interface.
 */
static int
zpl_readpage(struct file *filp, struct page *pp)
{
	struct inode *ip;
	struct page *pl[1];
	int error = 0;

	ASSERT(PageLocked(pp));
	ip = pp->mapping->host;
	pl[0] = pp;

	error = -zfs_getpage(ip, pl, 1);

	if (error) {
		SetPageError(pp);
		ClearPageUptodate(pp);
	} else {
		ClearPageError(pp);
		SetPageUptodate(pp);
		flush_dcache_page(pp);
	}

	unlock_page(pp);
	return (error);
}
Exemplo n.º 14
0
static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
{
	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
	unsigned char *pg_buf;
	int ret;

;

	BUG_ON(!PageLocked(pg));

	pg_buf = kmap(pg);
	/* FIXME: Can kmap fail? */

	ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE);

	if (ret) {
		ClearPageUptodate(pg);
		SetPageError(pg);
	} else {
		SetPageUptodate(pg);
		ClearPageError(pg);
	}

	flush_dcache_page(pg);
	kunmap(pg);

;
	return ret;
}
Exemplo n.º 15
0
/*
 * I/O completion handler for multipage BIOs.
 *
 * The mpage code never puts partial pages into a BIO (except for end-of-file).
 * If a page does not map to a contiguous run of blocks then it simply falls
 * back to block_read_full_page().
 *
 * Why is this?  If a page's completion depends on a number of different BIOs
 * which can complete in any order (or at the same time) then determining the
 * status of that page is hard.  See end_buffer_async_read() for the details.
 * There is no point in duplicating all that complexity.
 */
static void mpage_end_io(struct bio *bio)
{
	struct bio_vec *bv;
	int i;
	struct bvec_iter_all iter_all;

	if (ext4_bio_encrypted(bio)) {
		if (bio->bi_status) {
			fscrypt_release_ctx(bio->bi_private);
		} else {
			fscrypt_enqueue_decrypt_bio(bio->bi_private, bio);
			return;
		}
	}
	bio_for_each_segment_all(bv, bio, i, iter_all) {
		struct page *page = bv->bv_page;

		if (!bio->bi_status) {
			SetPageUptodate(page);
		} else {
			ClearPageUptodate(page);
			SetPageError(page);
		}
		unlock_page(page);
	}

	bio_put(bio);
}
Exemplo n.º 16
0
void nilfs_clear_dirty_pages(struct address_space *mapping)
{
	struct pagevec pvec;
	unsigned int i;
	pgoff_t index = 0;

	pagevec_init(&pvec, 0);

	while (pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
				  PAGEVEC_SIZE)) {
		for (i = 0; i < pagevec_count(&pvec); i++) {
			struct page *page = pvec.pages[i];
			struct buffer_head *bh, *head;

			lock_page(page);
			ClearPageUptodate(page);
			ClearPageMappedToDisk(page);
			bh = head = page_buffers(page);
			do {
				lock_buffer(bh);
				clear_buffer_dirty(bh);
				clear_buffer_nilfs_volatile(bh);
				clear_buffer_uptodate(bh);
				clear_buffer_mapped(bh);
				unlock_buffer(bh);
				bh = bh->b_this_page;
			} while (bh != head);

			__nilfs_clear_page_dirty(page);
			unlock_page(page);
		}
		pagevec_release(&pvec);
		cond_resched();
	}
}
Exemplo n.º 17
0
static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
{
	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
	unsigned char *pg_buf;
	int ret;

	jffs2_dbg(2, "%s(): ino #%lu, page at offset 0x%lx\n",
		  __func__, inode->i_ino, pg->index << PAGE_SHIFT);

	BUG_ON(!PageLocked(pg));

	pg_buf = kmap(pg);
	/* FIXME: Can kmap fail? */

	ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_SHIFT,
				     PAGE_SIZE);

	if (ret) {
		ClearPageUptodate(pg);
		SetPageError(pg);
	} else {
		SetPageUptodate(pg);
		ClearPageError(pg);
	}

	flush_dcache_page(pg);
	kunmap(pg);

	jffs2_dbg(2, "readpage finished\n");
	return ret;
}
Exemplo n.º 18
0
int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
{
	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
	unsigned char *pg_buf;
	int ret;

	D2(printk(KERN_DEBUG "jffs2_do_readpage_nolock(): ino #%lu, page at offset 0x%lx\n", inode->i_ino, pg->index << PAGE_CACHE_SHIFT));

	if (!PageLocked(pg))
                PAGE_BUG(pg);

	pg_buf = kmap(pg);
	/* FIXME: Can kmap fail? */

	ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE);

	if (ret) {
		ClearPageUptodate(pg);
		SetPageError(pg);
	} else {
		SetPageUptodate(pg);
		ClearPageError(pg);
	}

	flush_dcache_page(pg);
	kunmap(pg);

	D2(printk(KERN_DEBUG "readpage finished\n"));
	return 0;
}
Exemplo n.º 19
0
//int jffs2_commit_write (struct file *filp, struct page *pg, unsigned start, unsigned end)
int jffs2_commit_write (struct inode *d_inode, struct page *pg, unsigned start, unsigned end)
{
	/* Actually commit the write from the page cache page we're looking at.
	 * For now, we write the full page out each time. It sucks, but it's simple
	 */
	struct inode *inode = d_inode;
	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
	struct jffs2_raw_inode *ri;
	int ret = 0;
	uint32_t writtenlen = 0;

	D1(printk(KERN_DEBUG "jffs2_commit_write(): ino #%lu, page at 0x%lx, range %d-%d\n",
		  inode->i_ino, pg->index << PAGE_CACHE_SHIFT, start, end));


	ri = jffs2_alloc_raw_inode();
	if (!ri) {
		D1(printk(KERN_DEBUG "jffs2_commit_write(): Allocation of raw inode failed\n"));
		return -ENOMEM;
	}

	/* Set the fields that the generic jffs2_write_inode_range() code can't find */
	ri->ino = cpu_to_je32(inode->i_ino);
	ri->mode = cpu_to_jemode(inode->i_mode);
	ri->uid = cpu_to_je16(inode->i_uid);
	ri->gid = cpu_to_je16(inode->i_gid);
	ri->isize = cpu_to_je32((uint32_t)inode->i_size);
	ri->atime = ri->ctime = ri->mtime = cpu_to_je32(cyg_timestamp());

	ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + start,
				      (pg->index << PAGE_CACHE_SHIFT) + start,
				      end - start, &writtenlen);

	if (ret) {
		/* There was an error writing. */
		SetPageError(pg);
	}

	if (writtenlen) {
		if (inode->i_size < (pg->index << PAGE_CACHE_SHIFT) + start + writtenlen) {
			inode->i_size = (pg->index << PAGE_CACHE_SHIFT) + start + writtenlen;
			inode->i_ctime = inode->i_mtime = je32_to_cpu(ri->ctime);
		}
	}

	jffs2_free_raw_inode(ri);

	if (start+writtenlen < end) {
		/* generic_file_write has written more to the page cache than we've
		   actually written to the medium. Mark the page !Uptodate so that 
		   it gets reread */
		D1(printk(KERN_DEBUG "jffs2_commit_write(): Not all bytes written. Marking page !uptodate\n"));
		SetPageError(pg);
		ClearPageUptodate(pg);
	}

	D1(printk(KERN_DEBUG "jffs2_commit_write() returning %d\n",writtenlen?writtenlen:ret));
	return writtenlen?writtenlen:ret;
}
Exemplo n.º 20
0
/*
 * This is for invalidate_inode_pages().  That function can be called at
 * any time, and is not supposed to throw away dirty pages.  But pages can
 * be marked dirty at any time too.  So we re-check the dirtiness inside
 * ->tree_lock.  That provides exclusion against the __set_page_dirty
 * functions.
 */
static int
invalidate_complete_page(struct address_space *mapping, struct page *page)
{
	if (page->mapping != mapping)
		return 0;

	if (PagePrivate(page) && !try_to_release_page(page, 0))
		return 0;

	spin_lock_irq(&mapping->tree_lock);
	if (PageDirty(page)) {
		spin_unlock_irq(&mapping->tree_lock);
		return 0;
	}

	BUG_ON(PagePrivate(page));
	if (page_count(page) != 2) {
		spin_unlock_irq(&mapping->tree_lock);
		return 0;
	}
	__remove_from_page_cache(page);
	spin_unlock_irq(&mapping->tree_lock);
	ClearPageUptodate(page);
	page_cache_release(page);	/* pagecache ref */
	return 1;
}
Exemplo n.º 21
0
/**
 * ecryptfs_readpage
 * @file: An eCryptfs file
 * @page: Page from eCryptfs inode mapping into which to stick the read data
 *
 * Read in a page, decrypting if necessary.
 *
 * Returns zero on success; non-zero on error.
 */
static int ecryptfs_readpage(struct file *file, struct page *page)
{
	struct ecryptfs_crypt_stat *crypt_stat =
		&ecryptfs_inode_to_private(page->mapping->host)->crypt_stat;
	int rc = 0;

	/* printk("ecryptfs: read page %lu\n", (unsigned long)(page->index)); */
	/* dump_stack(); */

	if (!crypt_stat
	    || !(crypt_stat->flags & ECRYPTFS_ENCRYPTED)
	    || (crypt_stat->flags & ECRYPTFS_NEW_FILE)) {
		ecryptfs_printk(KERN_DEBUG,
				"Passing through unencrypted page\n");
		rc = ecryptfs_read_lower_page_segment(page, page->index, 0,
						      PAGE_CACHE_SIZE,
						      page->mapping->host);
	} else if (crypt_stat->flags & ECRYPTFS_VIEW_AS_ENCRYPTED) {
		if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) {
			rc = ecryptfs_copy_up_encrypted_with_header(page,
								    crypt_stat);
			if (rc) {
				printk(KERN_ERR "%s: Error attempting to copy "
				       "the encrypted content from the lower "
				       "file whilst inserting the metadata "
				       "from the xattr into the header; rc = "
				       "[%d]\n", __func__, rc);
				goto out;
			}

		} else {
			rc = ecryptfs_read_lower_page_segment(
				page, page->index, 0, PAGE_CACHE_SIZE,
				page->mapping->host);
			if (rc) {
				printk(KERN_ERR "Error reading page; rc = "
				       "[%d]\n", rc);
				goto out;
			}
		}
	} else {
		rc = ecryptfs_decrypt_page(page);

		if (rc) {
			ecryptfs_printk(KERN_ERR, "Error decrypting page; "
					"rc = [%d]\n", rc);
			goto out;
		}
	}
out:
	if (rc)
		ClearPageUptodate(page);
	else
		SetPageUptodate(page);
	ecryptfs_printk(KERN_DEBUG, "Unlocking page with index = [0x%.16lx]\n",
			page->index);
	unlock_page(page);
	return rc;
}
Exemplo n.º 22
0
/**
 * ecryptfs_writepage
 * @page: Page that is locked before this call is made
 *
 * Returns zero on success; non-zero otherwise
 *
 * This is where we encrypt the data and pass the encrypted data to
 * the lower filesystem.  In OpenPGP-compatible mode, we operate on
 * entire underlying packets.
 */
static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc)
{
	int rc;
#ifdef FEATURE_SDCARD_ENCRYPTION
	rc = ecryptfs_encrypt_page(page);
	struct inode *ecryptfs_inode;
	struct ecryptfs_crypt_stat *crypt_stat =
		&ecryptfs_inode_to_private(page->mapping->host)->crypt_stat;
	ecryptfs_inode = page->mapping->host;
#endif

#ifdef FEATURE_SDCARD_ENCRYPTION
	if (!crypt_stat || !(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
		ecryptfs_printk(KERN_DEBUG,        
				"Passing through unencrypted page\n");
        rc = ecryptfs_write_lower_page_segment(ecryptfs_inode, page,
                0, PAGE_CACHE_SIZE);
        if (rc) {
            ClearPageUptodate(page);
            goto out;
        }
        SetPageUptodate(page);
    } else {
	rc = ecryptfs_encrypt_page(page);
	if (rc) {
		ecryptfs_printk(KERN_WARNING, "Error encrypting "
				"page (upper index [0x%.16lx])\n", page->index);
		ClearPageUptodate(page);
		goto out;
    }
    SetPageUptodate(page);
    }
#else
	rc = ecryptfs_encrypt_page(page);
	if (rc) {
		ecryptfs_printk(KERN_WARNING, "Error encrypting "
				"page (upper index [0x%.16lx])\n", page->index);
		ClearPageUptodate(page);
		goto out;
	}
	SetPageUptodate(page);
#endif
out:
	unlock_page(page);
	return rc;
}
Exemplo n.º 23
0
void end_swap_bio_read(struct bio *bio, int err)
{
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct page *page = bio->bi_io_vec[0].bv_page;

	if (!uptodate) {
		SetPageError(page);
		ClearPageUptodate(page);
		printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
				imajor(bio->bi_bdev->bd_inode),
				iminor(bio->bi_bdev->bd_inode),
				(unsigned long long)bio->bi_sector);
	    goto out;
  }

  SetPageUptodate(page);

  /*
   * There is no guarantee that the page is in swap cache - the software
   * suspend code (at least) uses end_swap_bio_read() against a non-
   * swapcache page.  So we must check PG_swapcache before proceeding with
   * this optimization.
   */
  if (likely(PageSwapCache(page))) {
    /*
     * The swap subsystem performs lazy swap slot freeing,
     * expecting that the page will be swapped out again.
     * So we can avoid an unnecessary write if the page
     * isn't redirtied.
     * This is good for real swap storage because we can
     * reduce unnecessary I/O and enhance wear-leveling
     * if an SSD is used as the as swap device.
     * But if in-memory swap device (eg zram) is used,
     * this causes a duplicated copy between uncompressed
     * data in VM-owned memory and compressed data in
     * zram-owned memory.  So let's free zram-owned memory
     * and make the VM-owned decompressed page *dirty*,
     * so the page should be swapped out somewhere again if
     * we again wish to reclaim it.
     */
    struct gendisk *disk = bio->bi_bdev->bd_disk;
    if (disk->fops->swap_slot_free_notify) {
      swp_entry_t entry;
      unsigned long offset;

      entry.val = page_private(page);
      offset = swp_offset(entry);

      SetPageDirty(page);
      disk->fops->swap_slot_free_notify(bio->bi_bdev,
          offset);
    }
   }

out:
	unlock_page(page);
	bio_put(bio);
}
Exemplo n.º 24
0
static int cramfs_readpage(struct file *file, struct page * page)
{
	struct inode *inode = page->mapping->host;
	u32 maxblock;
	int bytes_filled;
	void *pgdata;

	maxblock = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
	bytes_filled = 0;
	pgdata = kmap(page);

	if (page->index < maxblock) {
		struct super_block *sb = inode->i_sb;
		u32 blkptr_offset = OFFSET(inode) + page->index*4;
		u32 start_offset, compr_len;

		start_offset = OFFSET(inode) + maxblock*4;
		mutex_lock(&read_mutex);
		if (page->index)
			start_offset = *(u32 *) cramfs_read(sb, blkptr_offset-4,
				4);
		compr_len = (*(u32 *) cramfs_read(sb, blkptr_offset, 4) -
			start_offset);
		mutex_unlock(&read_mutex);

		if (compr_len == 0)
			; /* hole */
		else if (unlikely(compr_len > (PAGE_CACHE_SIZE << 1))) {
			pr_err("cramfs: bad compressed blocksize %u\n",
				compr_len);
			goto err;
		} else {
			mutex_lock(&read_mutex);
			bytes_filled = cramfs_uncompress_block(pgdata,
				 PAGE_CACHE_SIZE,
				 cramfs_read(sb, start_offset, compr_len),
				 compr_len);
			mutex_unlock(&read_mutex);
			if (unlikely(bytes_filled < 0))
				goto err;
		}
	}

	memset(pgdata + bytes_filled, 0, PAGE_CACHE_SIZE - bytes_filled);
	flush_dcache_page(page);
	kunmap(page);
	SetPageUptodate(page);
	unlock_page(page);
	return 0;

err:
	kunmap(page);
	ClearPageUptodate(page);
	SetPageError(page);
	unlock_page(page);
	return 0;
}
Exemplo n.º 25
0
int ngffs_sysfile_do_readpage_nolock(struct inode *inode, struct page *pg)
{
	struct ngffs_info *ngsb=NGFFS_INFO(inode->i_sb);
	int i;
	int rv=0;
	__u32 offset;

	i=inode->i_ino-3;
	PK_DBG("sysfile found at %i\n",i);

	PK_DBG("sysfile read\n");
	if (!PageLocked(pg)) {
		/* PLEASECHECK Koen: PAGE_BUG has been removed as of 2.6.12 or so,
		 * no idea what it should be. */
		printk("page BUG for page at %p\n", pg);
		BUG();
	}

	if(i>=NGFFS_SYSFILES) {
		PK_WARN("sysfile id out of range!\n");
		goto readpage_fail;
	}

	/* Determine offset */
	offset = ngffs_sysfiles[i].ofs;
	if ( ngsb->mtd->size >= 0x200000 ) /* >= 2MB */
	{
		/* factory data stored in upper half of flash */
		if ( offset < 0x4000 ) /* factory data */
		{
			offset += 0x100000; /* 1MB offset */
		}
	}

	printk("[kwwo] reading abs addr 0x%x\n", offset);
	rv=ngffs_absolute_read(ngsb->mtd,offset,(u_char *)page_address(pg),ngffs_sysfiles[i].length);
	if(rv) goto readpage_fail;

	//  if (!strcmp(ngffs_sysfiles[i].name,"id")) memcpy((u_char *)page_address(pg),"AAAAAAAAAAAA",12);

	SetPageUptodate(pg);
	ClearPageError(pg);
	flush_dcache_page(pg);
	kunmap(pg);
	return 0;

readpage_fail:
	ClearPageUptodate(pg);
	SetPageError(pg);
	kunmap(pg);
	return rv;

}
Exemplo n.º 26
0
int do_write_data_page(struct f2fs_io_info *fio)
{
    struct page *page = fio->page;
    struct inode *inode = page->mapping->host;
    struct dnode_of_data dn;
    int err = 0;

    set_new_dnode(&dn, inode, NULL, NULL, 0);
    err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
    if (err)
        return err;

    fio->blk_addr = dn.data_blkaddr;

    /* This page is already truncated */
    if (fio->blk_addr == NULL_ADDR) {
        ClearPageUptodate(page);
        goto out_writepage;
    }

    if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
        fio->encrypted_page = f2fs_encrypt(inode, fio->page);
        if (IS_ERR(fio->encrypted_page)) {
            err = PTR_ERR(fio->encrypted_page);
            goto out_writepage;
        }
    }

    set_page_writeback(page);

    /*
     * If current allocation needs SSR,
     * it had better in-place writes for updated data.
     */
    if (unlikely(fio->blk_addr != NEW_ADDR &&
                 !is_cold_data(page) &&
                 need_inplace_update(inode))) {
        rewrite_data_page(fio);
        set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
        trace_f2fs_do_write_data_page(page, IPU);
    } else {
        write_data_page(&dn, fio);
        set_data_blkaddr(&dn);
        f2fs_update_extent_cache(&dn);
        trace_f2fs_do_write_data_page(page, OPU);
        set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
        if (page->index == 0)
            set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
    }
out_writepage:
    f2fs_put_dnode(&dn);
    return err;
}
Exemplo n.º 27
0
static int yaffs_readpage_nolock(struct file *f, struct page *pg)
{
	

	yaffs_Object *obj;
	unsigned char *pg_buf;
	int ret;

	yaffs_Device *dev;

	T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_readpage at %08x, size %08x\n",
			   (unsigned)(pg->index << PAGE_CACHE_SHIFT),
			   (unsigned)PAGE_CACHE_SIZE));

	obj = yaffs_DentryToObject(f->f_dentry);

	dev = obj->myDev;

#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
	BUG_ON(!PageLocked(pg));
#else
	if (!PageLocked(pg))
		PAGE_BUG(pg);
#endif

	pg_buf = kmap(pg);
	

	yaffs_GrossLock(dev);

	ret =
	    yaffs_ReadDataFromFile(obj, pg_buf, pg->index << PAGE_CACHE_SHIFT,
				   PAGE_CACHE_SIZE);

	yaffs_GrossUnlock(dev);

	if (ret >= 0)
		ret = 0;

	if (ret) {
		ClearPageUptodate(pg);
		SetPageError(pg);
	} else {
		SetPageUptodate(pg);
		ClearPageError(pg);
	}

	flush_dcache_page(pg);
	kunmap(pg);

	T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_readpage done\n"));
	return ret;
}
Exemplo n.º 28
0
/*
 * I/O completion handler for multipage BIOs.
 *
 * The mpage code never puts partial pages into a BIO (except for end-of-file).
 * If a page does not map to a contiguous run of blocks then it simply falls
 * back to block_read_full_page().
 *
 * Why is this?  If a page's completion depends on a number of different BIOs
 * which can complete in any order (or at the same time) then determining the
 * status of that page is hard.  See end_buffer_async_read() for the details.
 * There is no point in duplicating all that complexity.
 */
static void mpage_end_io(struct bio *bio, int err)
{
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;

	do {
		struct page *page = bvec->bv_page;

		if (--bvec >= bio->bi_io_vec)
			prefetchw(&bvec->bv_page->flags);
		if (bio_data_dir(bio) == READ) {
			if (uptodate) {
				int enc_status = tenc_decrypt_page(page);
				if (enc_status == TENC_CAN_UNLOCK) {
					/* Decryption code is not interested. Unlock immediately */
					SetPageUptodate(page);
					unlock_page(page);
				}
				else if (enc_status == TENC_DECR_FAIL) {
					ClearPageUptodate(page);
					SetPageError(page);
					unlock_page(page);
				}
			} else {
				ClearPageUptodate(page);
				SetPageError(page);
				unlock_page(page);
			}
		} else { /* bio_data_dir(bio) == WRITE */
			if (!uptodate) {
				SetPageError(page);
				if (page->mapping)
					set_bit(AS_EIO, &page->mapping->flags);
			}
			end_page_writeback(page);
		}
	} while (bvec >= bio->bi_io_vec);
	bio_put(bio);
}
Exemplo n.º 29
0
/* this is helper for plugin->write_begin() */
int do_prepare_write(struct file *file, struct page *page, unsigned from,
		 unsigned to)
{
	int result;
	file_plugin *fplug;
	struct inode *inode;

	assert("umka-3099", file != NULL);
	assert("umka-3100", page != NULL);
	assert("umka-3095", PageLocked(page));

	if (to - from == PAGE_CACHE_SIZE || PageUptodate(page))
		return 0;

	inode = page->mapping->host;
	fplug = inode_file_plugin(inode);

	if (page->mapping->a_ops->readpage == NULL)
		return RETERR(-EINVAL);

	result = page->mapping->a_ops->readpage(file, page);
	if (result != 0) {
		SetPageError(page);
		ClearPageUptodate(page);
		/* All reiser4 readpage() implementations should return the
		 * page locked in case of error. */
		assert("nikita-3472", PageLocked(page));
	} else {
		/*
		 * ->readpage() either:
		 *
		 *     1. starts IO against @page. @page is locked for IO in
		 *     this case.
		 *
		 *     2. doesn't start IO. @page is unlocked.
		 *
		 * In either case, page should be locked.
		 */
		lock_page(page);
		/*
		 * IO (if any) is completed at this point. Check for IO
		 * errors.
		 */
		if (!PageUptodate(page))
			result = RETERR(-EIO);
	}
	assert("umka-3098", PageLocked(page));
	return result;
}
Exemplo n.º 30
0
static void orangefs_invalidatepage(struct page *page,
				 unsigned int offset,
				 unsigned int length)
{
	gossip_debug(GOSSIP_INODE_DEBUG,
		     "orangefs_invalidatepage called on page %p "
		     "(offset is %u)\n",
		     page,
		     offset);

	ClearPageUptodate(page);
	ClearPageMappedToDisk(page);
	return;

}