Пример #1
0
/*
 * This is for invalidate_inode_pages().  That function can be called at
 * any time, and is not supposed to throw away dirty pages.  But pages can
 * be marked dirty at any time too.  So we re-check the dirtiness inside
 * ->tree_lock.  That provides exclusion against the __set_page_dirty
 * functions.
 */
static int
invalidate_complete_page(struct address_space *mapping, struct page *page)
{
	if (page->mapping != mapping)
		return 0;

	if (PagePrivate(page) && !try_to_release_page(page, 0))
		return 0;

	spin_lock_irq(&mapping->tree_lock);
	if (PageDirty(page)) {
		spin_unlock_irq(&mapping->tree_lock);
		return 0;
	}

	BUG_ON(PagePrivate(page));
	if (page_count(page) != 2) {
		spin_unlock_irq(&mapping->tree_lock);
		return 0;
	}
	__remove_from_page_cache(page);
	spin_unlock_irq(&mapping->tree_lock);
	ClearPageUptodate(page);
	page_cache_release(page);	/* pagecache ref */
	return 1;
}
Пример #2
0
/*
 * Attempt to release the private state associated with a page
 * - Called if either PG_private or PG_fscache is set on the page
 * - Caller holds page lock
 * - Return true (may release page) or false (may not)
 */
static int nfs_release_page(struct page *page, gfp_t gfp)
{
	struct address_space *mapping = page->mapping;

	dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);

	/* Always try to initiate a 'commit' if relevant, but only
	 * wait for it if __GFP_WAIT is set.  Even then, only wait 1
	 * second and only if the 'bdi' is not congested.
	 * Waiting indefinitely can cause deadlocks when the NFS
	 * server is on this machine, when a new TCP connection is
	 * needed and in other rare cases.  There is no particular
	 * need to wait extensively here.  A short wait has the
	 * benefit that someone else can worry about the freezer.
	 */
	if (mapping) {
		struct nfs_server *nfss = NFS_SERVER(mapping->host);
		nfs_commit_inode(mapping->host, 0);
		if ((gfp & __GFP_WAIT) &&
		    !bdi_write_congested(&nfss->backing_dev_info)) {
			wait_on_page_bit_killable_timeout(page, PG_private,
							  HZ);
			if (PagePrivate(page))
				set_bdi_congested(&nfss->backing_dev_info,
						  BLK_RW_ASYNC);
		}
	}
	/* If PagePrivate() is set, then the page is not freeable */
	if (PagePrivate(page))
		return 0;
	return nfs_fscache_release_page(page, gfp);
}
static void *
bts_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool overwrite)
{
	struct bts_buffer *buf;
	struct page *page;
	int node = (cpu == -1) ? cpu : cpu_to_node(cpu);
	unsigned long offset;
	size_t size = nr_pages << PAGE_SHIFT;
	int pg, nbuf, pad;

	/* count all the high order buffers */
	for (pg = 0, nbuf = 0; pg < nr_pages;) {
		page = virt_to_page(pages[pg]);
		if (WARN_ON_ONCE(!PagePrivate(page) && nr_pages > 1))
			return NULL;
		pg += 1 << page_private(page);
		nbuf++;
	}

	/*
	 * to avoid interrupts in overwrite mode, only allow one physical
	 */
	if (overwrite && nbuf > 1)
		return NULL;

	buf = kzalloc_node(offsetof(struct bts_buffer, buf[nbuf]), GFP_KERNEL, node);
	if (!buf)
		return NULL;

	buf->nr_pages = nr_pages;
	buf->nr_bufs = nbuf;
	buf->snapshot = overwrite;
	buf->data_pages = pages;
	buf->real_size = size - size % BTS_RECORD_SIZE;

	for (pg = 0, nbuf = 0, offset = 0, pad = 0; nbuf < buf->nr_bufs; nbuf++) {
		unsigned int __nr_pages;

		page = virt_to_page(pages[pg]);
		__nr_pages = PagePrivate(page) ? 1 << page_private(page) : 1;
		buf->buf[nbuf].page = page;
		buf->buf[nbuf].offset = offset;
		buf->buf[nbuf].displacement = (pad ? BTS_RECORD_SIZE - pad : 0);
		buf->buf[nbuf].size = buf_size(page) - buf->buf[nbuf].displacement;
		pad = buf->buf[nbuf].size % BTS_RECORD_SIZE;
		buf->buf[nbuf].size -= pad;

		pg += __nr_pages;
		offset += __nr_pages << PAGE_SHIFT;
	}

	return buf;
}
Пример #4
0
/*
 * Try to drop buffers from the pages in a pagevec
 */
void pagevec_strip(struct pagevec *pvec)
{
	int i;

	for (i = 0; i < pagevec_count(pvec); i++) {
		struct page *page = pvec->pages[i];

		if (PagePrivate(page) && trylock_page(page)) {
			if (PagePrivate(page))
				try_to_release_page(page, 0);
			unlock_page(page);
		}
	}
}
Пример #5
0
static int v9fs_release_page(struct page *page, gfp_t gfp)
{
	if (PagePrivate(page))
		return 0;

	return v9fs_fscache_release_page(page, gfp);
}
Пример #6
0
/*
 * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
 * but sets SwapCache flag and private instead of mapping and index.
 */
int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
{
	int error;

	BUG_ON(!PageLocked(page));
	BUG_ON(PageSwapCache(page));
	BUG_ON(PagePrivate(page));
	BUG_ON(!PageSwapBacked(page));
	error = radix_tree_preload(gfp_mask);
	if (!error) {
		page_cache_get(page);
		SetPageSwapCache(page);
		set_page_private(page, entry.val);

		spin_lock_irq(&swapper_space.tree_lock);
		error = radix_tree_insert(&swapper_space.page_tree,
						entry.val, page);
		if (likely(!error)) {
			total_swapcache_pages++;
			__inc_zone_page_state(page, NR_FILE_PAGES);
			INC_CACHE_INFO(add_total);
		}
		spin_unlock_irq(&swapper_space.tree_lock);
		radix_tree_preload_end();

		if (unlikely(error)) {
			set_page_private(page, 0UL);
			ClearPageSwapCache(page);
			page_cache_release(page);
		}
	}
	return error;
}
Пример #7
0
/*
 * Dirty a page.  Optimistically adjust accounting, on the assumption
 * that we won't race with invalidate.  If we do, readjust.
 */
static int ceph_set_page_dirty(struct page *page)
{
	struct address_space *mapping = page->mapping;
	struct inode *inode;
	struct ceph_inode_info *ci;
	struct ceph_snap_context *snapc;
	int ret;

	if (unlikely(!mapping))
		return !TestSetPageDirty(page);

	if (PageDirty(page)) {
		dout("%p set_page_dirty %p idx %lu -- already dirty\n",
		     mapping->host, page, page->index);
		BUG_ON(!PagePrivate(page));
		return 0;
	}

	inode = mapping->host;
	ci = ceph_inode(inode);

	/* dirty the head */
	spin_lock(&ci->i_ceph_lock);
	BUG_ON(ci->i_wr_ref == 0); // caller should hold Fw reference
	if (__ceph_have_pending_cap_snap(ci)) {
		struct ceph_cap_snap *capsnap =
				list_last_entry(&ci->i_cap_snaps,
						struct ceph_cap_snap,
						ci_item);
		snapc = ceph_get_snap_context(capsnap->context);
		capsnap->dirty_pages++;
	} else {
Пример #8
0
static int afs_releasepage(struct page *page, gfp_t gfp_flags)
{
	struct afs_writeback *wb = (struct afs_writeback *) page_private(page);
	struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);

	_enter("{{%x:%u}[%lu],%lx},%x",
	       vnode->fid.vid, vnode->fid.vnode, page->index, page->flags,
	       gfp_flags);

	/* deny if page is being written to the cache and the caller hasn't
	 * elected to wait */
#ifdef CONFIG_AFS_FSCACHE
	if (!fscache_maybe_release_page(vnode->cache, page, gfp_flags)) {
		_leave(" = F [cache busy]");
		return 0;
	}
#endif

	if (PagePrivate(page)) {
		if (wb) {
			set_page_private(page, 0);
			afs_put_writeback(wb);
		}
		ClearPagePrivate(page);
	}

	/* indicate that the page can be released */
	_leave(" = T");
	return 1;
}
Пример #9
0
/*
 * Attempt to steal a page from a pipe buffer. This should perhaps go into
 * a vm helper function, it's already simplified quite a bit by the
 * addition of remove_mapping(). If success is returned, the caller may
 * attempt to reuse this page for another destination.
 */
static int page_cache_pipe_buf_steal(struct pipe_inode_info *pipe,
				     struct pipe_buffer *buf)
{
	struct page *page = buf->page;
	struct address_space *mapping = page_mapping(page);

	lock_page(page);

	WARN_ON(!PageUptodate(page));

	/*
	 * At least for ext2 with nobh option, we need to wait on writeback
	 * completing on this page, since we'll remove it from the pagecache.
	 * Otherwise truncate wont wait on the page, allowing the disk
	 * blocks to be reused by someone else before we actually wrote our
	 * data to them. fs corruption ensues.
	 */
	wait_on_page_writeback(page);

	if (PagePrivate(page))
		try_to_release_page(page, mapping_gfp_mask(mapping));

	if (!remove_mapping(mapping, page)) {
		unlock_page(page);
		return 1;
	}

	buf->flags |= PIPE_BUF_FLAG_LRU;
	return 0;
}
Пример #10
0
/*
 * invalidate part or all of a page
 */
static void afs_file_invalidatepage(struct page *page, unsigned long offset)
{
	int ret = 1;

	_enter("{%lu},%lu", page->index, offset);

	BUG_ON(!PageLocked(page));

	if (PagePrivate(page)) {
#ifdef AFS_CACHING_SUPPORT
		struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
		cachefs_uncache_page(vnode->cache,page);
#endif

		/* We release buffers only if the entire page is being
		 * invalidated.
		 * The get_block cached value has been unconditionally
		 * invalidated, so real IO is not possible anymore.
		 */
		if (offset == 0) {
			BUG_ON(!PageLocked(page));

			ret = 0;
			if (!PageWriteback(page))
				ret = page->mapping->a_ops->releasepage(page,
									0);
			/* possibly should BUG_ON(!ret); - neilb */
		}
	}

	_leave(" = %d", ret);
} /* end afs_file_invalidatepage() */
Пример #11
0
static void nfs_check_dirty_writeback(struct page *page,
				bool *dirty, bool *writeback)
{
	struct nfs_inode *nfsi;
	struct address_space *mapping = page_file_mapping(page);

	if (!mapping || PageSwapCache(page))
		return;

	/*
	 * Check if an unstable page is currently being committed and
	 * if so, have the VM treat it as if the page is under writeback
	 * so it will not block due to pages that will shortly be freeable.
	 */
	nfsi = NFS_I(mapping->host);
	if (test_bit(NFS_INO_COMMIT, &nfsi->flags)) {
		*writeback = true;
		return;
	}

	/*
	 * If PagePrivate() is set, then the page is not freeable and as the
	 * inode is not being committed, it's not going to be cleaned in the
	 * near future so treat it as dirty
	 */
	if (PagePrivate(page))
		*dirty = true;
}
Пример #12
0
/*
 * __add_to_swap_cache resembles add_to_page_cache on swapper_space,
 * but sets SwapCache flag and private instead of mapping and index.
 */
static int __add_to_swap_cache(struct page *page, swp_entry_t entry,
			       gfp_t gfp_mask)
{
	int error;

	BUG_ON(PageSwapCache(page));
	BUG_ON(PagePrivate(page));
	error = radix_tree_preload(gfp_mask);
	if (!error) {
		set_page_no_new_refs(page);
		write_lock_irq(&swapper_space.tree_lock);
		error = radix_tree_insert(&swapper_space.page_tree,
						entry.val, page);
		if (!error) {
			page_cache_get(page);
			SetPageLocked(page);
			SetPageSwapCache(page);
			set_page_private(page, entry.val);
			total_swapcache_pages++;
			__inc_zone_page_state(page, NR_FILE_PAGES);
		}
		write_unlock_irq(&swapper_space.tree_lock);
		end_page_no_new_refs(page);
		radix_tree_preload_end();
	}
	return error;
}
Пример #13
0
/*
 * invalidate part or all of a page
 * - release a page and clean up its private data if offset is 0 (indicating
 *   the entire page)
 */
static void afs_invalidatepage(struct page *page, unsigned long offset)
{
	struct afs_writeback *wb = (struct afs_writeback *) page_private(page);

	_enter("{%lu},%lu", page->index, offset);

	BUG_ON(!PageLocked(page));

	/* we clean up only if the entire page is being invalidated */
	if (offset == 0) {
#ifdef CONFIG_AFS_FSCACHE
		if (PageFsCache(page)) {
			struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
			fscache_wait_on_page_write(vnode->cache, page);
			fscache_uncache_page(vnode->cache, page);
			ClearPageFsCache(page);
		}
#endif

		if (PagePrivate(page)) {
			if (wb && !PageWriteback(page)) {
				set_page_private(page, 0);
				afs_put_writeback(wb);
			}

			if (!page_private(page))
				ClearPagePrivate(page);
		}
	}

	_leave("");
}
Пример #14
0
/**
 * nfs_create_request - Create an NFS read/write request.
 * @file: file descriptor to use
 * @inode: inode to which the request is attached
 * @page: page to write
 * @offset: starting offset within the page for the write
 * @count: number of bytes to read/write
 *
 * The page must be locked by the caller. This makes sure we never
 * create two different requests for the same page.
 * User should ensure it is safe to sleep in this function.
 */
struct nfs_page *
nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
		   struct page *page,
		   unsigned int offset, unsigned int count)
{
	struct nfs_page		*req;

	/* try to allocate the request struct */
	req = nfs_page_alloc();
	if (req == NULL)
		return ERR_PTR(-ENOMEM);

	/* Initialize the request struct. Initially, we assume a
	 * long write-back delay. This will be adjusted in
	 * update_nfs_request below if the region is not locked. */
	req->wb_page    = page;
	atomic_set(&req->wb_complete, 0);
	req->wb_index	= page->index;
	page_cache_get(page);
	BUG_ON(PagePrivate(page));
	BUG_ON(!PageLocked(page));
	BUG_ON(page->mapping->host != inode);
	req->wb_offset  = offset;
	req->wb_pgbase	= offset;
	req->wb_bytes   = count;
	req->wb_context = get_nfs_open_context(ctx);
	kref_init(&req->wb_kref);
	return req;
}
Пример #15
0
/*
 * For address_spaces which do not use buffers.  Just tag the page as dirty in
 * its radix tree.
 *
 * This is also used when a single buffer is being dirtied: we want to set the
 * page dirty in that case, but not all the buffers.  This is a "bottom-up"
 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
 *
 * Most callers have locked the page, which pins the address_space in memory.
 * But zap_pte_range() does not lock the page, however in that case the
 * mapping is pinned by the vma's ->vm_file reference.
 *
 * We take care to handle the case where the page was truncated from the
 * mapping by re-checking page_mapping() inside tree_lock.
 */
int __set_page_dirty_nobuffers(struct page *page)
{
	if (!TestSetPageDirty(page)) {
		struct address_space *mapping = page_mapping(page);
		struct address_space *mapping2;

		if (!mapping)
			return 1;

		spin_lock_irq(&mapping->tree_lock);
		mapping2 = page_mapping(page);
		if (mapping2) { /* Race with truncate? */
			BUG_ON(mapping2 != mapping);
			WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
			account_page_dirtied(page, mapping);
			radix_tree_tag_set(&mapping->page_tree,
				page_index(page), PAGECACHE_TAG_DIRTY);
		}
		spin_unlock_irq(&mapping->tree_lock);
		if (mapping->host) {
			/* !PageAnon && !swapper_space */
			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
		}
		return 1;
	}
	return 0;
}
Пример #16
0
/*
 * invalidate part or all of a page
 */
static void afs_invalidatepage(struct page *page, unsigned long offset)
{
	int ret = 1;

	_enter("{%lu},%lu", page->index, offset);

	BUG_ON(!PageLocked(page));

	if (PagePrivate(page)) {
		/* We release buffers only if the entire page is being
		 * invalidated.
		 * The get_block cached value has been unconditionally
		 * invalidated, so real IO is not possible anymore.
		 */
		if (offset == 0) {
			BUG_ON(!PageLocked(page));

			ret = 0;
			if (!PageWriteback(page))
				ret = page->mapping->a_ops->releasepage(page,
									0);
			/* possibly should BUG_ON(!ret); - neilb */
		}
	}

	_leave(" = %d", ret);
}
Пример #17
0
/*
 * Free the swap entry like above, but also try to
 * free the page cache entry if it is the last user.
 */
void free_swap_and_cache(swp_entry_t entry)
{
	struct swap_info_struct * p;
	struct page *page = NULL;

	if (is_migration_entry(entry))
		return;

	p = swap_info_get(entry);
	if (p) {
		if (swap_entry_free(p, swp_offset(entry)) == 1) {
			page = find_get_page(&swapper_space, entry.val);
			if (page && unlikely(TestSetPageLocked(page))) {
				page_cache_release(page);
				page = NULL;
			}
		}
		spin_unlock(&swap_lock);
	}
	if (page) {
		int one_user;

		BUG_ON(PagePrivate(page));
		one_user = (page_count(page) == 2);
		/* Only cache user (+us), or swap space full? Free it! */
		/* Also recheck PageSwapCache after page is locked (above) */
		if (PageSwapCache(page) && !PageWriteback(page) &&
					(one_user || vm_swap_full())) {
			delete_from_swap_cache(page);
			SetPageDirty(page);
		}
		unlock_page(page);
		page_cache_release(page);
	}
}
Пример #18
0
int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
			    struct gnttab_map_grant_ref *kmap_ops,
			    struct page **pages, unsigned int count)
{
	int i, ret = 0;
	bool lazy = false;
	pte_t *pte;

	if (xen_feature(XENFEAT_auto_translated_physmap))
		return 0;

	if (kmap_ops &&
	    !in_interrupt() &&
	    paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
		arch_enter_lazy_mmu_mode();
		lazy = true;
	}

	for (i = 0; i < count; i++) {
		unsigned long mfn, pfn;

		/* Do not add to override if the map failed. */
		if (map_ops[i].status)
			continue;

		if (map_ops[i].flags & GNTMAP_contains_pte) {
			pte = (pte_t *)(mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
				(map_ops[i].host_addr & ~PAGE_MASK));
			mfn = pte_mfn(*pte);
		} else {
			mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
		}
		pfn = page_to_pfn(pages[i]);

		WARN_ON(PagePrivate(pages[i]));
		SetPagePrivate(pages[i]);
		set_page_private(pages[i], mfn);
		pages[i]->index = pfn_to_mfn(pfn);

		if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
			ret = -ENOMEM;
			goto out;
		}

		if (kmap_ops) {
			ret = m2p_add_override(mfn, pages[i], &kmap_ops[i]);
			if (ret)
				goto out;
		}
	}

out:
	if (lazy)
		arch_leave_lazy_mmu_mode();

	return ret;
}
Пример #19
0
/*
 * Attempt to release the private state associated with a page
 * - Called if either PG_private or PG_fscache is set on the page
 * - Caller holds page lock
 * - Return true (may release page) or false (may not)
 */
static int nfs_release_page(struct page *page, gfp_t gfp)
{
	dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);

	/* If PagePrivate() is set, then the page is not freeable */
	if (PagePrivate(page))
		return 0;
	return nfs_fscache_release_page(page, gfp);
}
Пример #20
0
void nilfs_invalidatepage(struct page *page, unsigned long offset)
{
	struct buffer_head *bh = NULL;

	if (PagePrivate(page)) {
		bh = page_buffers(page);
		BUG_ON(buffer_nilfs_allocated(bh));
	}
	block_invalidatepage(page, offset);
}
Пример #21
0
static struct nfs_page *nfs_page_find_request_locked(struct page *page)
{
	struct nfs_page *req = NULL;

	if (PagePrivate(page)) {
		req = (struct nfs_page *)page_private(page);
		if (req != NULL)
			kref_get(&req->wb_kref);
	}
	return req;
}
Пример #22
0
static void print_page(struct page *page)
{
	dprintk("PRINTPAGE page %p\n", page);
	dprintk("	PagePrivate %d\n", PagePrivate(page));
	dprintk("	PageUptodate %d\n", PageUptodate(page));
	dprintk("	PageError %d\n", PageError(page));
	dprintk("	PageDirty %d\n", PageDirty(page));
	dprintk("	PageReferenced %d\n", PageReferenced(page));
	dprintk("	PageLocked %d\n", PageLocked(page));
	dprintk("	PageWriteback %d\n", PageWriteback(page));
	dprintk("	PageMappedToDisk %d\n", PageMappedToDisk(page));
	dprintk("\n");
}
Пример #23
0
/**
 * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
 * @nr_pages; number of pages to free
 * @pages: the pages
 */
void gnttab_free_pages(int nr_pages, struct page **pages)
{
	int i;

	for (i = 0; i < nr_pages; i++) {
		if (PagePrivate(pages[i])) {
#if BITS_PER_LONG < 64
			kfree((void *)page_private(pages[i]));
#endif
			ClearPagePrivate(pages[i]);
		}
	}
	free_xenballooned_pages(nr_pages, pages);
}
Пример #24
0
/*
 * This must be called only on pages that have
 * been verified to be in the swap cache.
 */
void __delete_from_swap_cache(struct page *page)
{
	BUG_ON(!PageLocked(page));
	BUG_ON(!PageSwapCache(page));
	BUG_ON(PageWriteback(page));
	BUG_ON(PagePrivate(page));

	radix_tree_delete(&swapper_space.page_tree, page_private(page));
	set_page_private(page, 0);
	ClearPageSwapCache(page);
	total_swapcache_pages--;
	__dec_zone_page_state(page, NR_FILE_PAGES);
	INC_CACHE_INFO(del_total);
}
Пример #25
0
/*
 * Decide whether a read/modify/write cycle may be more efficient
 * then a modify/write/read cycle when writing to a page in the
 * page cache.
 *
 * The modify/write/read cycle may occur if a page is read before
 * being completely filled by the writer.  In this situation, the
 * page must be completely written to stable storage on the server
 * before it can be refilled by reading in the page from the server.
 * This can lead to expensive, small, FILE_SYNC mode writes being
 * done.
 *
 * It may be more efficient to read the page first if the file is
 * open for reading in addition to writing, the page is not marked
 * as Uptodate, it is not dirty or waiting to be committed,
 * indicating that it was previously allocated and then modified,
 * that there were valid bytes of data in that range of the file,
 * and that the new data won't completely replace the old data in
 * that range of the file.
 */
static int nfs_want_read_modify_write(struct file *file, struct page *page,
			loff_t pos, unsigned len)
{
	unsigned int pglen = nfs_page_length(page);
	unsigned int offset = pos & (PAGE_CACHE_SIZE - 1);
	unsigned int end = offset + len;

	if ((file->f_mode & FMODE_READ) &&	/* open for read? */
	    !PageUptodate(page) &&		/* Uptodate? */
	    !PagePrivate(page) &&		/* i/o request already? */
	    pglen &&				/* valid bytes of file? */
	    (end < pglen || offset))		/* replace all valid bytes? */
		return 1;
	return 0;
}
Пример #26
0
/*
 * If truncate cannot remove the fs-private metadata from the page, the page
 * becomes anonymous.  It will be left on the LRU and may even be mapped into
 * user pagetables if we're racing with filemap_nopage().
 *
 * We need to bale out if page->mapping is no longer equal to the original
 * mapping.  This happens a) when the VM reclaimed the page while we waited on
 * its lock, b) when a concurrent invalidate_inode_pages got there first and
 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
 */
static void
truncate_complete_page(struct address_space *mapping, struct page *page)
{
	if (page->mapping != mapping)
		return;

	if (PagePrivate(page))
		do_invalidatepage(page, 0);

	clear_page_dirty(page);
	ClearPageUptodate(page);
	ClearPageMappedToDisk(page);
	remove_from_page_cache(page);
	page_cache_release(page);	/* pagecache ref */
}
Пример #27
0
/*
 * Attempt to steal a page from a pipe buffer. This should perhaps go into
 * a vm helper function, it's already simplified quite a bit by the
 * addition of remove_mapping(). If success is returned, the caller may
 * attempt to reuse this page for another destination.
 */
static int page_cache_pipe_buf_steal(struct pipe_inode_info *pipe,
				     struct pipe_buffer *buf)
{
	struct page *page = buf->page;
	struct address_space *mapping;

	lock_page(page);

	mapping = page_mapping(page);
	if (mapping) {
		WARN_ON(!PageUptodate(page));

		/*
		 * At least for ext2 with nobh option, we need to wait on
		 * writeback completing on this page, since we'll remove it
		 * from the pagecache.  Otherwise truncate wont wait on the
		 * page, allowing the disk blocks to be reused by someone else
		 * before we actually wrote our data to them. fs corruption
		 * ensues.
		 */
		wait_on_page_writeback(page);

		if (PagePrivate(page)
		    && try_to_release_page(page, GFP_KERNEL))
			goto out_unlock;

		/*
		 * If we succeeded in removing the mapping, set LRU flag
		 * and return good.
		 */
		if (remove_mapping(mapping, page)) {
			buf->flags |= PIPE_BUF_FLAG_LRU;
			return 0;
		}
	}

	/*
	 * Raced with truncate or failed to remove page from current
	 * address space, unlock and return failure.
	 */
out_unlock:
	unlock_page(page);
	return 1;
}
Пример #28
0
/*
 * invalidate part or all of a page
 */
static void afs_file_invalidatepage(struct page *page, unsigned long offset)
{
	_enter("{%lu},%lu", page->index, offset);

	BUG_ON(!PageLocked(page));

	if (PagePrivate(page)) {
		/* We release buffers only if the entire page is being
		 * invalidated.
		 * The get_block cached value has been unconditionally
		 * invalidated, so real IO is not possible anymore.
		 */
		if (offset == 0 && !PageWriteback(page))
			page->mapping->a_ops->releasepage(page, 0);
	}

	_leave("");

} /* end afs_file_invalidatepage() */
Пример #29
0
int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
			      struct gnttab_map_grant_ref *kmap_ops,
			      struct page **pages, unsigned int count)
{
	int i, ret = 0;
	bool lazy = false;

	if (xen_feature(XENFEAT_auto_translated_physmap))
		return 0;

	if (kmap_ops &&
	    !in_interrupt() &&
	    paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
		arch_enter_lazy_mmu_mode();
		lazy = true;
	}

	for (i = 0; i < count; i++) {
		unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i]));
		unsigned long pfn = page_to_pfn(pages[i]);

		if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) {
			ret = -EINVAL;
			goto out;
		}

		set_page_private(pages[i], INVALID_P2M_ENTRY);
		WARN_ON(!PagePrivate(pages[i]));
		ClearPagePrivate(pages[i]);
		set_phys_to_machine(pfn, pages[i]->index);

		if (kmap_ops)
			ret = m2p_remove_override(pages[i], &kmap_ops[i], mfn);
		if (ret)
			goto out;
	}

out:
	if (lazy)
		arch_leave_lazy_mmu_mode();
	return ret;
}
Пример #30
0
/*
 * Work out if there are any other processes sharing this
 * swap cache page. Free it if you can. Return success.
 */
int remove_exclusive_swap_page(struct page *page)
{
	int retval;
	struct swap_info_struct * p;
	swp_entry_t entry;

	BUG_ON(PagePrivate(page));
	BUG_ON(!PageLocked(page));

	if (!PageSwapCache(page))
		return 0;
	if (PageWriteback(page))
		return 0;
	if (page_count(page) != 2) /* 2: us + cache */
		return 0;

	entry.val = page_private(page);
	p = swap_info_get(entry);
	if (!p)
		return 0;

	/* Is the only swap cache user the cache itself? */
	retval = 0;
	if (p->swap_map[swp_offset(entry)] == 1) {
		/* Recheck the page count with the swapcache lock held.. */
		write_lock_irq(&swapper_space.tree_lock);
		if ((page_count(page) == 2) && !PageWriteback(page)) {
			__delete_from_swap_cache(page);
			SetPageDirty(page);
			retval = 1;
		}
		write_unlock_irq(&swapper_space.tree_lock);
	}
	spin_unlock(&swap_lock);

	if (retval) {
		swap_free(entry);
		page_cache_release(page);
	}

	return retval;
}