Exemple #1
0
void ub_io_release_context(struct page *page, size_t wrote)
{
	struct page_beancounter *pb;

	if (io_debug_precheck_release(page))
		return;

	if (unlikely(in_interrupt())) {
		WARN_ON_ONCE(1);
		return;
	}

	spin_lock(&pb_lock);
	pb = iopb_to_pb(page_pbc(page));
	if (unlikely(pb == NULL))
		/*
		 * this may happen if we failed to allocate
		 * context in ub_io_save_context or raced with it
		 */
		goto out_unlock;

	if (wrote)
		pb->ub->bytes_wrote += wrote;

	put_page_io(page, pb);
out_unlock:
	spin_unlock(&pb_lock);

	if (pb != NULL) {
		put_beancounter(pb->ub);
		io_pb_free(pb);
	}
}
Exemple #2
0
void ub_io_release_debug(struct page *page)
{
	struct page_beancounter *pb;
	static int once = 0;

	pb = page_pbc(page);
	if (likely(iopb_to_pb(pb) == NULL))
		return;

	if (!once) {
		printk("BUG: Page has an IO bc but is not expectd to\n");
		dump_stack();
		once = 1;
	}

	spin_lock(&pb_lock);
	not_released++;
	pb = iopb_to_pb(pb);
	page_pbc(page) = NULL;
	io_debug_release(pb);
	pb->ub->io_pb_held--;
	spin_unlock(&pb_lock);

	put_beancounter(pb->ub);
	io_pb_free(pb);
}
Exemple #3
0
/**
 * add_to_swap - allocate swap space for a page
 * @page: page we want to move to swap
 *
 * Allocate swap space for the page and add the page to the
 * swap cache.  Caller needs to hold the page lock. 
 */
int add_to_swap(struct page *page)
{
	swp_entry_t entry;
	int err;
	struct user_beancounter *ub;

	VM_BUG_ON(!PageLocked(page));
	VM_BUG_ON(!PageUptodate(page));


	ub = pb_grab_page_ub(page);
	if (IS_ERR(ub))
		return 0;

	entry = get_swap_page(ub);
	put_beancounter(ub);
	if (!entry.val)
		return 0;

	/*
	 * Radix-tree node allocations from PF_MEMALLOC contexts could
	 * completely exhaust the page allocator. __GFP_NOMEMALLOC
	 * stops emergency reserves from being allocated.
	 *
	 * TODO: this could cause a theoretical memory reclaim
	 * deadlock in the swap out path.
	 */
	/*
	 * Add it to the swap cache and mark it dirty
	 */
	err = add_to_swap_cache(page, entry,
			__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);

	if (!err) {	/* Success */
		SetPageDirty(page);
		return 1;
	} else {	/* -ENOMEM radix-tree allocation failure */
		/*
		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
		 * clear SWAP_HAS_CACHE flag.
		 */
		swapcache_free(entry, NULL);
		return 0;
	}
}
Exemple #4
0
void ub_io_save_context(struct page *page, size_t bytes_dirtied)
{
	struct user_beancounter *ub;
	struct page_beancounter *pb, *mapped_pb, *io_pb;

	if (unlikely(in_interrupt())) {
		WARN_ON_ONCE(1);
		return;
	}

	/*
	 * FIXME - this can happen from atomic context and
	 * it's probably not that good to loose some requests
	 */

	pb = io_pb_alloc();
	io_pb = NULL;

	spin_lock(&pb_lock);
	if (io_debug_precheck_save(page))
		goto out_unlock;

	mapped_pb = page_pbc(page);
	io_pb = iopb_to_pb(mapped_pb);
	if (io_pb != NULL) {
		/*
		 * this page has an IO - release it and force a new one
		 * We could also race with page cleaning - see below
		 */
		mapped_pb = io_pb->page_pb_list;
		put_page_io(page, io_pb);
	}

	/*
	 * If the page is mapped we must save the context
	 * it maps to. If the page isn't mapped we use current
	 * context as this is a regular write.
	 */

	if (mapped_pb != NULL)
		ub = top_beancounter(mapped_pb->ub);
	else
		ub = get_io_ub();

	if (!PageDirty(page)) {
		/*
		 * race with clear_page_dirty(_for_io) - account
		 * writes for ub_io_release_context()
		 */
		if (io_pb != NULL)
			io_pb->ub->bytes_wrote += PAGE_CACHE_SIZE;
		if (pb != NULL)
			io_pb_free(pb);
		goto out_unlock;
	}

	if (pb == NULL) {
		ub->bytes_dirty_missed += bytes_dirtied;
		goto out_unlock;
	}

	/*
	 * the page may become clean here, but the context will be seen
	 * in ub_io_release_context()
	 */

	pb->ub = get_beancounter(ub);
	pb->page_pb_list = mapped_pb;
	ub->bytes_dirtied += bytes_dirtied;

	set_page_io(page, pb, mapped_pb);

out_unlock:
	spin_unlock(&pb_lock);

	if (io_pb != NULL) {
		put_beancounter(io_pb->ub);
		io_pb_free(io_pb);
	}
}