static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
{
	struct inode *inode = page->mapping->host;
	struct gfs2_sbd *sdp = GFS2_SB(inode);
	int error;
	int done_trans = 0;

	error = gfs2_writepage_common(page, wbc);
	if (error <= 0)
		return error;

	if (PageChecked(page)) {
		if (wbc->sync_mode != WB_SYNC_ALL)
			goto out_ignore;
		error = gfs2_trans_begin(sdp, RES_DINODE + 1, 0);
		if (error)
			goto out_ignore;
		done_trans = 1;
	}
	error = __gfs2_jdata_writepage(page, wbc);
	if (done_trans)
		gfs2_trans_end(sdp);
	return error;

out_ignore:
	redirty_page_for_writepage(wbc, page);
	unlock_page(page);
	return 0;
}
static int gfs2_writepage_common(struct page *page,
				 struct writeback_control *wbc)
{
	struct inode *inode = page->mapping->host;
	struct gfs2_inode *ip = GFS2_I(inode);
	struct gfs2_sbd *sdp = GFS2_SB(inode);
	loff_t i_size = i_size_read(inode);
	pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
	unsigned offset;

	if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
		goto out;
	if (current->journal_info)
		goto redirty;
	/* Is the page fully outside i_size? (truncate in progress) */
	offset = i_size & (PAGE_CACHE_SIZE-1);
	if (page->index > end_index || (page->index == end_index && !offset)) {
		page->mapping->a_ops->invalidatepage(page, 0);
		goto out;
	}
	return 1;
redirty:
	redirty_page_for_writepage(wbc, page);
out:
	unlock_page(page);
	return 0;
}
예제 #3
0
static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc)
{
	int err;
	struct buffer_head *bh, *head;
	int nr_underway = 0;
	int write_op = (1 << BIO_RW_META) | ((wbc->sync_mode == WB_SYNC_ALL ?
			WRITE_SYNC_PLUG : WRITE));

	BUG_ON(!PageLocked(page));
	BUG_ON(!page_has_buffers(page));

	head = page_buffers(page);
	bh = head;

	do {
		if (!buffer_mapped(bh))
			continue;
		/*
		 * If it's a fully non-blocking write attempt and we cannot
		 * lock the buffer then redirty the page.  Note that this can
		 * potentially cause a busy-wait loop from pdflush and kswapd
		 * activity, but those code paths have their own higher-level
		 * throttling.
		 */
		if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
			lock_buffer(bh);
		} else if (!trylock_buffer(bh)) {
			redirty_page_for_writepage(wbc, page);
			continue;
		}
		if (test_clear_buffer_dirty(bh)) {
			mark_buffer_async_write(bh);
		} else {
			unlock_buffer(bh);
		}
	} while ((bh = bh->b_this_page) != head);

	/*
	 * The page and its buffers are protected by PageWriteback(), so we can
	 * drop the bh refcounts early.
	 */
	BUG_ON(PageWriteback(page));
	set_page_writeback(page);

	do {
		struct buffer_head *next = bh->b_this_page;
		if (buffer_async_write(bh)) {
			submit_bh(write_op, bh);
			nr_underway++;
		}
		bh = next;
	} while (bh != head);
	unlock_page(page);

	err = 0;
	if (nr_underway == 0)
		end_page_writeback(page);

	return err;
}
예제 #4
0
/**
 * ecryptfs_writepage
 * @page: Page that is locked before this call is made
 *
 * Returns zero on success; non-zero otherwise
<<<<<<< HEAD
=======
<<<<<<< HEAD
>>>>>>> ae1773bb70f3d7cf73324ce8fba787e01d8fa9f2
 *
 * This is where we encrypt the data and pass the encrypted data to
 * the lower filesystem.  In OpenPGP-compatible mode, we operate on
 * entire underlying packets.
<<<<<<< HEAD
=======
=======
>>>>>>> 58a75b6a81be54a8b491263ca1af243e9d8617b9
>>>>>>> ae1773bb70f3d7cf73324ce8fba787e01d8fa9f2
 */
static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc)
{
	int rc;

	/*
	 * Refuse to write the page out if we are called from reclaim context
	 * since our writepage() path may potentially allocate memory when
	 * calling into the lower fs vfs_write() which may in turn invoke
	 * us again.
	 */
	if (current->flags & PF_MEMALLOC) {
		redirty_page_for_writepage(wbc, page);
		rc = 0;
		goto out;
	}

	rc = ecryptfs_encrypt_page(page);
	if (rc) {
		ecryptfs_printk(KERN_WARNING, "Error encrypting "
				"page (upper index [0x%.16lx])\n", page->index);
		ClearPageUptodate(page);
		goto out;
	}
	SetPageUptodate(page);
out:
	unlock_page(page);
	return rc;
}
예제 #5
0
파일: inode.c 프로젝트: battahma/cs444
static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
{
	struct inode *inode = page->mapping->host;
	int err;

	if (inode->i_sb->s_flags & MS_RDONLY) {
		/*
		 * It means that filesystem was remounted in read-only
		 * mode because of error or metadata corruption. But we
		 * have dirty pages that try to be flushed in background.
		 * So, here we simply discard this dirty page.
		 */
		nilfs_clear_dirty_page(page, false);
		unlock_page(page);
		return -EROFS;
	}

	redirty_page_for_writepage(wbc, page);
	unlock_page(page);

	if (wbc->sync_mode == WB_SYNC_ALL) {
		err = nilfs_construct_segment(inode->i_sb);
		if (unlikely(err))
			return err;
	} else if (wbc->for_reclaim)
		nilfs_flush_segment(inode->i_sb, inode->i_ino);

	return 0;
}
예제 #6
0
/**
 * ecryptfs_writepage
 * @page: Page that is locked before this call is made
 *
 * Returns zero on success; non-zero otherwise
 *
 * This is where we encrypt the data and pass the encrypted data to
 * the lower filesystem.  In OpenPGP-compatible mode, we operate on
 * entire underlying packets.
 */
static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc)
{
    int rc;
#if 1 // FEATURE_SDCARD_ENCRYPTION
    struct inode *ecryptfs_inode;
    struct ecryptfs_crypt_stat *crypt_stat =
        &ecryptfs_inode_to_private(page->mapping->host)->crypt_stat;
    ecryptfs_inode = page->mapping->host;
#endif

    /*
     * Refuse to write the page out if we are called from reclaim context
     * since our writepage() path may potentially allocate memory when
     * calling into the lower fs vfs_write() which may in turn invoke
     * us again.
     */
    if (current->flags & PF_MEMALLOC) {
        redirty_page_for_writepage(wbc, page);
        rc = 0;
        goto out;
    }

#if 1 // FEATURE_SDCARD_ENCRYPTION
    if (!crypt_stat || !(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
        ecryptfs_printk(KERN_DEBUG,
                        "Passing through unencrypted page\n");
        rc = ecryptfs_write_lower_page_segment(ecryptfs_inode, page,
                                               0, PAGE_CACHE_SIZE);
        if (rc) {
            ClearPageUptodate(page);
            goto out;
        }
        SetPageUptodate(page);
    } else {
        rc = ecryptfs_encrypt_page(page);
        if (rc) {
            ecryptfs_printk(KERN_WARNING, "Error encrypting "
                            "page (upper index [0x%.16lx])\n", page->index);
            ClearPageUptodate(page);
            goto out;
        }
        SetPageUptodate(page);
    }
#else
    rc = ecryptfs_encrypt_page(page);
    if (rc) {
        ecryptfs_printk(KERN_WARNING, "Error encrypting "
                        "page (upper index [0x%.16lx])\n", page->index);
        ClearPageUptodate(page);
        goto out;
    }
    SetPageUptodate(page);
#endif
out:
    unlock_page(page);
    return rc;
}
예제 #7
0
static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
{
	struct inode *inode = page->mapping->host;
	struct gfs2_inode *ip = GFS2_I(inode);
	struct gfs2_sbd *sdp = GFS2_SB(inode);
	loff_t i_size = i_size_read(inode);
	pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
	unsigned offset;
	int error;
	int done_trans = 0;

	if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) {
		unlock_page(page);
		return -EIO;
	}
	if (current->journal_info)
		goto out_ignore;

	/* Is the page fully outside i_size? (truncate in progress) */
        offset = i_size & (PAGE_CACHE_SIZE-1);
	if (page->index > end_index || (page->index == end_index && !offset)) {
		page->mapping->a_ops->invalidatepage(page, 0);
		unlock_page(page);
		return 0; /* don't care */
	}

	if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED || gfs2_is_jdata(ip)) {
		error = gfs2_trans_begin(sdp, RES_DINODE + 1, 0);
		if (error)
			goto out_ignore;
		if (!page_has_buffers(page)) {
			create_empty_buffers(page, inode->i_sb->s_blocksize,
					     (1 << BH_Dirty)|(1 << BH_Uptodate));
		}
		gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
		done_trans = 1;
	}
	error = block_write_full_page(page, gfs2_get_block_noalloc, wbc);
	if (done_trans)
		gfs2_trans_end(sdp);
	gfs2_meta_cache_flush(ip);
	return error;

out_ignore:
	redirty_page_for_writepage(wbc, page);
	unlock_page(page);
	return 0;
}
예제 #8
0
static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
{
	struct inode *inode = page->mapping->host;
	int err;

	redirty_page_for_writepage(wbc, page);
	unlock_page(page);

	if (wbc->sync_mode == WB_SYNC_ALL) {
		err = nilfs_construct_segment(inode->i_sb);
		if (unlikely(err))
			return err;
	} else if (wbc->for_reclaim)
		nilfs_flush_segment(inode->i_sb, inode->i_ino);

	return 0;
}
static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
{
	struct inode *inode = page->mapping->host;
	int err;

	page_debug(3, "called (page=%p, index=%lu, wbc nonblocking %d, "
		   "wbc for_reclaim %d)\n",
		    page, page->index, wbc->nonblocking, wbc->for_reclaim);
	redirty_page_for_writepage(wbc, page);
	unlock_page(page);

	if (wbc->sync_mode == WB_SYNC_ALL) {
		err = nilfs_construct_segment(inode->i_sb);
		if (unlikely(err))
			return err;
	} else if (wbc->for_reclaim)
		nilfs_flush_segment(inode->i_sb, inode->i_ino);

	return 0;
}
예제 #10
0
파일: aops.c 프로젝트: AK101111/linux
static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
{
	struct inode *inode = page->mapping->host;
	struct gfs2_inode *ip = GFS2_I(inode);
	struct gfs2_sbd *sdp = GFS2_SB(inode);
	int ret;

	if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
		goto out;
	if (PageChecked(page) || current->journal_info)
		goto out_ignore;
	ret = __gfs2_jdata_writepage(page, wbc);
	return ret;

out_ignore:
	redirty_page_for_writepage(wbc, page);
out:
	unlock_page(page);
	return 0;
}
예제 #11
0
static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc)
{
	int rc;

	if (current->flags & PF_MEMALLOC) {
		redirty_page_for_writepage(wbc, page);
		rc = 0;
		goto out;
	}

	rc = ecryptfs_encrypt_page(page);
	if (rc) {
		ecryptfs_printk(KERN_WARNING, "Error encrypting "
				"page (upper index [0x%.16lx])\n", page->index);
		ClearPageUptodate(page);
		goto out;
	}
	SetPageUptodate(page);
out:
	unlock_page(page);
	return rc;
}
예제 #12
0
파일: write.c 프로젝트: krzk/linux
/*
 * Redirty all the pages in a given range.
 */
static void afs_redirty_pages(struct writeback_control *wbc,
			      struct address_space *mapping,
			      pgoff_t first, pgoff_t last)
{
	struct afs_vnode *vnode = AFS_FS_I(mapping->host);
	struct pagevec pv;
	unsigned count, loop;

	_enter("{%x:%u},%lx-%lx",
	       vnode->fid.vid, vnode->fid.vnode, first, last);

	pagevec_init(&pv);

	do {
		_debug("redirty %lx-%lx", first, last);

		count = last - first + 1;
		if (count > PAGEVEC_SIZE)
			count = PAGEVEC_SIZE;
		pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
		ASSERTCMP(pv.nr, ==, count);

		for (loop = 0; loop < count; loop++) {
			struct page *page = pv.pages[loop];

			redirty_page_for_writepage(wbc, page);
			end_page_writeback(page);
			if (page->index >= first)
				first = page->index + 1;
		}

		__pagevec_release(&pv);
	} while (first <= last);

	_leave("");
}
예제 #13
0
파일: data.c 프로젝트: aejsmith/linux
static int f2fs_write_data_page(struct page *page,
                                struct writeback_control *wbc)
{
    struct inode *inode = page->mapping->host;
    struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
    loff_t i_size = i_size_read(inode);
    const pgoff_t end_index = ((unsigned long long) i_size)
                              >> PAGE_CACHE_SHIFT;
    unsigned offset = 0;
    bool need_balance_fs = false;
    int err = 0;
    struct f2fs_io_info fio = {
        .sbi = sbi,
        .type = DATA,
        .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
        .page = page,
        .encrypted_page = NULL,
    };

    trace_f2fs_writepage(page, DATA);

    if (page->index < end_index)
        goto write;

    /*
     * If the offset is out-of-range of file size,
     * this page does not have to be written to disk.
     */
    offset = i_size & (PAGE_CACHE_SIZE - 1);
    if ((page->index >= end_index + 1) || !offset)
        goto out;

    zero_user_segment(page, offset, PAGE_CACHE_SIZE);
write:
    if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
        goto redirty_out;
    if (f2fs_is_drop_cache(inode))
        goto out;
    if (f2fs_is_volatile_file(inode) && !wbc->for_reclaim &&
            available_free_memory(sbi, BASE_CHECK))
        goto redirty_out;

    /* Dentry blocks are controlled by checkpoint */
    if (S_ISDIR(inode->i_mode)) {
        if (unlikely(f2fs_cp_error(sbi)))
            goto redirty_out;
        err = do_write_data_page(&fio);
        goto done;
    }

    /* we should bypass data pages to proceed the kworkder jobs */
    if (unlikely(f2fs_cp_error(sbi))) {
        SetPageError(page);
        goto out;
    }

    if (!wbc->for_reclaim)
        need_balance_fs = true;
    else if (has_not_enough_free_secs(sbi, 0))
        goto redirty_out;

    err = -EAGAIN;
    f2fs_lock_op(sbi);
    if (f2fs_has_inline_data(inode))
        err = f2fs_write_inline_data(inode, page);
    if (err == -EAGAIN)
        err = do_write_data_page(&fio);
    f2fs_unlock_op(sbi);
done:
    if (err && err != -ENOENT)
        goto redirty_out;

    clear_cold_data(page);
out:
    inode_dec_dirty_pages(inode);
    if (err)
        ClearPageUptodate(page);
    unlock_page(page);
    if (need_balance_fs)
        f2fs_balance_fs(sbi);
    if (wbc->for_reclaim)
        f2fs_submit_merged_bio(sbi, DATA, WRITE);
    return 0;

redirty_out:
    redirty_page_for_writepage(wbc, page);
    return AOP_WRITEPAGE_ACTIVATE;
}

static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
                            void *data)
{
    struct address_space *mapping = data;
    int ret = mapping->a_ops->writepage(page, wbc);
    mapping_set_error(mapping, ret);
    return ret;
}

/*
 * This function was copied from write_cche_pages from mm/page-writeback.c.
 * The major change is making write step of cold data page separately from
 * warm/hot data page.
 */
static int f2fs_write_cache_pages(struct address_space *mapping,
                                  struct writeback_control *wbc, writepage_t writepage,
                                  void *data)
{
    int ret = 0;
    int done = 0;
    struct pagevec pvec;
    int nr_pages;
    pgoff_t uninitialized_var(writeback_index);
    pgoff_t index;
    pgoff_t end;		/* Inclusive */
    pgoff_t done_index;
    int cycled;
    int range_whole = 0;
    int tag;
    int step = 0;

    pagevec_init(&pvec, 0);
next:
    if (wbc->range_cyclic) {
        writeback_index = mapping->writeback_index; /* prev offset */
        index = writeback_index;
        if (index == 0)
            cycled = 1;
        else
            cycled = 0;
        end = -1;
    } else {
        index = wbc->range_start >> PAGE_CACHE_SHIFT;
        end = wbc->range_end >> PAGE_CACHE_SHIFT;
        if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
            range_whole = 1;
        cycled = 1; /* ignore range_cyclic tests */
    }
    if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
        tag = PAGECACHE_TAG_TOWRITE;
    else
        tag = PAGECACHE_TAG_DIRTY;
retry:
    if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
        tag_pages_for_writeback(mapping, index, end);
    done_index = index;
    while (!done && (index <= end)) {
        int i;

        nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
                                      min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1);
        if (nr_pages == 0)
            break;

        for (i = 0; i < nr_pages; i++) {
            struct page *page = pvec.pages[i];

            if (page->index > end) {
                done = 1;
                break;
            }

            done_index = page->index;

            lock_page(page);

            if (unlikely(page->mapping != mapping)) {
continue_unlock:
                unlock_page(page);
                continue;
            }

            if (!PageDirty(page)) {
                /* someone wrote it for us */
                goto continue_unlock;
            }

            if (step == is_cold_data(page))
                goto continue_unlock;

            if (PageWriteback(page)) {
                if (wbc->sync_mode != WB_SYNC_NONE)
                    f2fs_wait_on_page_writeback(page, DATA);
                else
                    goto continue_unlock;
            }

            BUG_ON(PageWriteback(page));
            if (!clear_page_dirty_for_io(page))
                goto continue_unlock;

            ret = (*writepage)(page, wbc, data);
            if (unlikely(ret)) {
                if (ret == AOP_WRITEPAGE_ACTIVATE) {
                    unlock_page(page);
                    ret = 0;
                } else {
                    done_index = page->index + 1;
                    done = 1;
                    break;
                }
            }

            if (--wbc->nr_to_write <= 0 &&
                    wbc->sync_mode == WB_SYNC_NONE) {
                done = 1;
                break;
            }
        }
        pagevec_release(&pvec);
        cond_resched();
    }

    if (step < 1) {
        step++;
        goto next;
    }

    if (!cycled && !done) {
        cycled = 1;
        index = 0;
        end = writeback_index - 1;
        goto retry;
    }
    if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
        mapping->writeback_index = done_index;

    return ret;
}
예제 #14
0
/**
 * ecryptfs_writepage
 * @page: Page that is locked before this call is made
 *
 * Returns zero on success; non-zero otherwise
 *
 * This is where we encrypt the data and pass the encrypted data to
 * the lower filesystem.  In OpenPGP-compatible mode, we operate on
 * entire underlying packets.
 */
static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc)
{
#ifndef CONFIG_CRYPTO_DEV_KFIPS
	int rc;
#else
	struct ecryptfs_page_crypt_req *page_crypt_req;
	int rc = 0;
#endif
#if 1 // FEATURE_SDCARD_ENCRYPTION
	struct inode *ecryptfs_inode;
	struct ecryptfs_crypt_stat *crypt_stat =
		&ecryptfs_inode_to_private(page->mapping->host)->crypt_stat;
	ecryptfs_inode = page->mapping->host;
#endif

	/*
	 * Refuse to write the page out if we are called from reclaim context
	 * since our writepage() path may potentially allocate memory when
	 * calling into the lower fs vfs_write() which may in turn invoke
	 * us again.
	 */
	if (current->flags & PF_MEMALLOC) {
		redirty_page_for_writepage(wbc, page);
#ifndef CONFIG_CRYPTO_DEV_KFIPS
		rc = 0;
#endif
		goto out;
	}

#if 1 // FEATURE_SDCARD_ENCRYPTION
	if (!crypt_stat || !(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
		ecryptfs_printk(KERN_DEBUG,
				"Passing through unencrypted page\n");
		rc = ecryptfs_write_lower_page_segment(ecryptfs_inode, page,
			0, PAGE_CACHE_SIZE);
		if (rc) {
			ClearPageUptodate(page);
			goto out;
		}
		SetPageUptodate(page);
	} else {
#ifndef CONFIG_CRYPTO_DEV_KFIPS
	rc = ecryptfs_encrypt_page(page);
	if (rc) {
		ecryptfs_printk(KERN_WARNING, "Error encrypting "
				"page (upper index [0x%.16lx])\n", page->index);
		ClearPageUptodate(page);
#else
//	rc = ecryptfs_encrypt_page(page);
//	if (rc) {
//		ecryptfs_printk(KERN_WARNING, "Error encrypting "
//				"page (upper index [0x%.16lx])\n", page->index);
//		ClearPageUptodate(page);
	page_crypt_req = ecryptfs_alloc_page_crypt_req(
				page, ecryptfs_writepage_complete);
	if (unlikely(!page_crypt_req)) {
		rc = -ENOMEM;
		ecryptfs_printk(KERN_ERR,
				"Failed to allocate page crypt request "
				"for encryption\n");
#endif
		goto out;
	}
#ifndef CONFIG_CRYPTO_DEV_KFIPS
	SetPageUptodate(page);
#else
//	SetPageUptodate(page);
	set_page_writeback(page);
	ecryptfs_encrypt_page_async(page_crypt_req);
#endif
	}
#else
	rc = ecryptfs_encrypt_page(page);
	if (rc) {
		ecryptfs_printk(KERN_WARNING, "Error encrypting "
				"page (upper index [0x%.16lx])\n", page->index);
		ClearPageUptodate(page);
		goto out;
	}
	SetPageUptodate(page);
#endif
out:
	unlock_page(page);
	return rc;
}

static void strip_xattr_flag(char *page_virt,
			     struct ecryptfs_crypt_stat *crypt_stat)
{
	if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) {
		size_t written;

		crypt_stat->flags &= ~ECRYPTFS_METADATA_IN_XATTR;
		ecryptfs_write_crypt_stat_flags(page_virt, crypt_stat,
						&written);
		crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR;
	}
}

/**
 *   Header Extent:
 *     Octets 0-7:        Unencrypted file size (big-endian)
 *     Octets 8-15:       eCryptfs special marker
 *     Octets 16-19:      Flags
 *      Octet 16:         File format version number (between 0 and 255)
 *      Octets 17-18:     Reserved
 *      Octet 19:         Bit 1 (lsb): Reserved
 *                        Bit 2: Encrypted?
 *                        Bits 3-8: Reserved
 *     Octets 20-23:      Header extent size (big-endian)
 *     Octets 24-25:      Number of header extents at front of file
 *                        (big-endian)
 *     Octet  26:         Begin RFC 2440 authentication token packet set
 */

/**
 * ecryptfs_copy_up_encrypted_with_header
 * @page: Sort of a ``virtual'' representation of the encrypted lower
 *        file. The actual lower file does not have the metadata in
 *        the header. This is locked.
 * @crypt_stat: The eCryptfs inode's cryptographic context
 *
 * The ``view'' is the version of the file that userspace winds up
 * seeing, with the header information inserted.
 */
static int
ecryptfs_copy_up_encrypted_with_header(struct page *page,
				       struct ecryptfs_crypt_stat *crypt_stat)
{
	loff_t extent_num_in_page = 0;
	loff_t num_extents_per_page = (PAGE_CACHE_SIZE
				       / crypt_stat->extent_size);
	int rc = 0;

	while (extent_num_in_page < num_extents_per_page) {
		loff_t view_extent_num = ((((loff_t)page->index)
					   * num_extents_per_page)
					  + extent_num_in_page);
		size_t num_header_extents_at_front =
			(crypt_stat->metadata_size / crypt_stat->extent_size);

		if (view_extent_num < num_header_extents_at_front) {
			/* This is a header extent */
			char *page_virt;

			page_virt = kmap_atomic(page);
			memset(page_virt, 0, PAGE_CACHE_SIZE);
			/* TODO: Support more than one header extent */
			if (view_extent_num == 0) {
				size_t written;

				rc = ecryptfs_read_xattr_region(
					page_virt, page->mapping->host);
				strip_xattr_flag(page_virt + 16, crypt_stat);
				ecryptfs_write_header_metadata(page_virt + 20,
							       crypt_stat,
							       &written);
			}
			kunmap_atomic(page_virt);
			flush_dcache_page(page);
			if (rc) {
				printk(KERN_ERR "%s: Error reading xattr "
				       "region; rc = [%d]\n", __func__, rc);
				goto out;
			}
		} else {
			/* This is an encrypted data extent */
			loff_t lower_offset =
				((view_extent_num * crypt_stat->extent_size)
				 - crypt_stat->metadata_size);

			rc = ecryptfs_read_lower_page_segment(
				page, (lower_offset >> PAGE_CACHE_SHIFT),
				(lower_offset & ~PAGE_CACHE_MASK),
				crypt_stat->extent_size, page->mapping->host);
			if (rc) {
				printk(KERN_ERR "%s: Error attempting to read "
				       "extent at offset [%lld] in the lower "
				       "file; rc = [%d]\n", __func__,
				       lower_offset, rc);
				goto out;
			}
		}
		extent_num_in_page++;
	}
out:
	return rc;
}
예제 #15
0
/**
 * ntfs_mft_writepage - check if a metadata page contains dirty mft records
 * @page:	metadata page possibly containing dirty mft records
 * @wbc:	writeback control structure
 *
 * This is called from the VM when it wants to have a dirty $MFT/$DATA metadata
 * page cache page cleaned.  The VM has already locked the page and marked it
 * clean.  Instead of writing the page as a conventional ->writepage function
 * would do, we check if the page still contains any dirty mft records (it must
 * have done at some point in the past since the page was marked dirty) and if
 * none are found, i.e. all mft records are clean, we unlock the page and
 * return.  The VM is then free to do with the page as it pleases.  If on the
 * other hand we do find any dirty mft records in the page, we redirty the page
 * before unlocking it and returning so the VM knows that the page is still
 * busy and cannot be thrown out.
 *
 * Note, we do not actually write any dirty mft records here because they are
 * dirty inodes and hence will be written by the VFS inode dirty code paths.
 * There is no need to write them from the VM page dirty code paths, too and in
 * fact once we implement journalling it would be a complete nightmare having
 * two code paths leading to mft record writeout.
 */
static int ntfs_mft_writepage(struct page *page, struct writeback_control *wbc)
{
	struct inode *mft_vi = page->mapping->host;
	struct super_block *sb = mft_vi->i_sb;
	ntfs_volume *vol = NTFS_SB(sb);
	u8 *maddr;
	MFT_RECORD *m;
	ntfs_inode **extent_nis;
	unsigned long mft_no;
	int nr, i, j;
	BOOL is_dirty = FALSE;

	BUG_ON(!PageLocked(page));
	BUG_ON(PageWriteback(page));
	BUG_ON(mft_vi != vol->mft_ino);
	/* The first mft record number in the page. */
	mft_no = page->index << (PAGE_CACHE_SHIFT - vol->mft_record_size_bits);
	/* Number of mft records in the page. */
	nr = PAGE_CACHE_SIZE >> vol->mft_record_size_bits;
	BUG_ON(!nr);
	ntfs_debug("Entering for %i inodes starting at 0x%lx.", nr, mft_no);
	/* Iterate over the mft records in the page looking for a dirty one. */
	maddr = (u8*)kmap(page);
	for (i = 0; i < nr; ++i, ++mft_no, maddr += vol->mft_record_size) {
		struct inode *vi;
		ntfs_inode *ni, *eni;
		ntfs_attr na;

		na.mft_no = mft_no;
		na.name = NULL;
		na.name_len = 0;
		na.type = AT_UNUSED;
		/*
		 * Check if the inode corresponding to this mft record is in
		 * the VFS inode cache and obtain a reference to it if it is.
		 */
		ntfs_debug("Looking for inode 0x%lx in icache.", mft_no);
		/*
		 * For inode 0, i.e. $MFT itself, we cannot use ilookup5() from
		 * here or we deadlock because the inode is already locked by
		 * the kernel (fs/fs-writeback.c::__sync_single_inode()) and
		 * ilookup5() waits until the inode is unlocked before
		 * returning it and it never gets unlocked because
		 * ntfs_mft_writepage() never returns.  )-:  Fortunately, we
		 * have inode 0 pinned in icache for the duration of the mount
		 * so we can access it directly.
		 */
		if (!mft_no) {
			/* Balance the below iput(). */
			vi = igrab(mft_vi);
			BUG_ON(vi != mft_vi);
		} else
			vi = ilookup5(sb, mft_no, (test_t)ntfs_test_inode, &na);
		if (vi) {
			ntfs_debug("Inode 0x%lx is in icache.", mft_no);
			/* The inode is in icache.  Check if it is dirty. */
			ni = NTFS_I(vi);
			if (!NInoDirty(ni)) {
				/* The inode is not dirty, skip this record. */
				ntfs_debug("Inode 0x%lx is not dirty, "
						"continuing search.", mft_no);
				iput(vi);
				continue;
			}
			ntfs_debug("Inode 0x%lx is dirty, aborting search.",
					mft_no);
			/* The inode is dirty, no need to search further. */
			iput(vi);
			is_dirty = TRUE;
			break;
		}
		ntfs_debug("Inode 0x%lx is not in icache.", mft_no);
		/* The inode is not in icache. */
		/* Skip the record if it is not a mft record (type "FILE"). */
		if (!ntfs_is_mft_recordp(maddr)) {
			ntfs_debug("Mft record 0x%lx is not a FILE record, "
					"continuing search.", mft_no);
			continue;
		}
		m = (MFT_RECORD*)maddr;
		/*
		 * Skip the mft record if it is not in use.  FIXME:  What about
		 * deleted/deallocated (extent) inodes?  (AIA)
		 */
		if (!(m->flags & MFT_RECORD_IN_USE)) {
			ntfs_debug("Mft record 0x%lx is not in use, "
					"continuing search.", mft_no);
			continue;
		}
		/* Skip the mft record if it is a base inode. */
		if (!m->base_mft_record) {
			ntfs_debug("Mft record 0x%lx is a base record, "
					"continuing search.", mft_no);
			continue;
		}
		/*
		 * This is an extent mft record.  Check if the inode
		 * corresponding to its base mft record is in icache.
		 */
		na.mft_no = MREF_LE(m->base_mft_record);
		ntfs_debug("Mft record 0x%lx is an extent record.  Looking "
				"for base inode 0x%lx in icache.", mft_no,
				na.mft_no);
		vi = ilookup5(sb, na.mft_no, (test_t)ntfs_test_inode,
				&na);
		if (!vi) {
			/*
			 * The base inode is not in icache.  Skip this extent
			 * mft record.
			 */
			ntfs_debug("Base inode 0x%lx is not in icache, "
					"continuing search.", na.mft_no);
			continue;
		}
		ntfs_debug("Base inode 0x%lx is in icache.", na.mft_no);
		/*
		 * The base inode is in icache.  Check if it has the extent
		 * inode corresponding to this extent mft record attached.
		 */
		ni = NTFS_I(vi);
		down(&ni->extent_lock);
		if (ni->nr_extents <= 0) {
			/*
			 * The base inode has no attached extent inodes.  Skip
			 * this extent mft record.
			 */
			up(&ni->extent_lock);
			iput(vi);
			continue;
		}
		/* Iterate over the attached extent inodes. */
		extent_nis = ni->ext.extent_ntfs_inos;
		for (eni = NULL, j = 0; j < ni->nr_extents; ++j) {
			if (mft_no == extent_nis[j]->mft_no) {
				/*
				 * Found the extent inode corresponding to this
				 * extent mft record.
				 */
				eni = extent_nis[j];
				break;
			}
		}
		/*
		 * If the extent inode was not attached to the base inode, skip
		 * this extent mft record.
		 */
		if (!eni) {
			up(&ni->extent_lock);
			iput(vi);
			continue;
		}
		/*
		 * Found the extent inode corrsponding to this extent mft
		 * record.  If it is dirty, no need to search further.
		 */
		if (NInoDirty(eni)) {
			up(&ni->extent_lock);
			iput(vi);
			is_dirty = TRUE;
			break;
		}
		/* The extent inode is not dirty, so do the next record. */
		up(&ni->extent_lock);
		iput(vi);
	}
	kunmap(page);
	/* If a dirty mft record was found, redirty the page. */
	if (is_dirty) {
		ntfs_debug("Inode 0x%lx is dirty.  Redirtying the page "
				"starting at inode 0x%lx.", mft_no,
				page->index << (PAGE_CACHE_SHIFT -
				vol->mft_record_size_bits));
		redirty_page_for_writepage(wbc, page);
		unlock_page(page);
	} else {
		/*
		 * Keep the VM happy.  This must be done otherwise the
		 * radix-tree tag PAGECACHE_TAG_DIRTY remains set even though
		 * the page is clean.
		 */
		BUG_ON(PageWriteback(page));
		set_page_writeback(page);
		unlock_page(page);
		end_page_writeback(page);
	}
	ntfs_debug("Done.");
	return 0;
}