Beispiel #1
0
/*
 * Attempt to release the private state associated with a page
 * - Called if either PG_private or PG_fscache is set on the page
 * - Caller holds page lock
 * - Return true (may release page) or false (may not)
 */
static int nfs_release_page(struct page *page, gfp_t gfp)
{
	struct address_space *mapping = page->mapping;

	dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);

	/* Always try to initiate a 'commit' if relevant, but only
	 * wait for it if __GFP_WAIT is set.  Even then, only wait 1
	 * second and only if the 'bdi' is not congested.
	 * Waiting indefinitely can cause deadlocks when the NFS
	 * server is on this machine, when a new TCP connection is
	 * needed and in other rare cases.  There is no particular
	 * need to wait extensively here.  A short wait has the
	 * benefit that someone else can worry about the freezer.
	 */
	if (mapping) {
		struct nfs_server *nfss = NFS_SERVER(mapping->host);
		nfs_commit_inode(mapping->host, 0);
		if ((gfp & __GFP_WAIT) &&
		    !bdi_write_congested(&nfss->backing_dev_info)) {
			wait_on_page_bit_killable_timeout(page, PG_private,
							  HZ);
			if (PagePrivate(page))
				set_bdi_congested(&nfss->backing_dev_info,
						  BLK_RW_ASYNC);
		}
	}
	/* If PagePrivate() is set, then the page is not freeable */
	if (PagePrivate(page))
		return 0;
	return nfs_fscache_release_page(page, gfp);
}
Beispiel #2
0
/*
 * Attempt to release the private state associated with a page
 * - Called if either PG_private or PG_fscache is set on the page
 * - Caller holds page lock
 * - Return true (may release page) or false (may not)
 */
static int nfs_release_page(struct page *page, gfp_t gfp)
{
	dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);

	/* If PagePrivate() is set, then the page is not freeable */
	if (PagePrivate(page))
		return 0;
	return nfs_fscache_release_page(page, gfp);
}
Beispiel #3
0
/*
 * Attempt to release the private state associated with a page
 * - Called if either PG_private or PG_fscache is set on the page
 * - Caller holds page lock
 * - Return true (may release page) or false (may not)
 */
static int nfs_release_page(struct page *page, gfp_t gfp)
{
	struct address_space *mapping = page->mapping;

	dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);

	/* Only do I/O if gfp is a superset of GFP_KERNEL */
	if (mapping && (gfp & GFP_KERNEL) == GFP_KERNEL) {
		int how = FLUSH_SYNC;

		/* Don't let kswapd deadlock waiting for OOM RPC calls */
		if (current_is_kswapd())
			how = 0;
		nfs_commit_inode(mapping->host, how);
	}
	/* If PagePrivate() is set, then the page is not freeable */
	if (PagePrivate(page))
		return 0;
	return nfs_fscache_release_page(page, gfp);
}
Beispiel #4
0
/* Two pass sync: first using WB_SYNC_NONE, then WB_SYNC_ALL */
static int nfs_write_mapping(struct address_space *mapping, int how)
{
	struct writeback_control wbc = {
		.bdi = mapping->backing_dev_info,
		.sync_mode = WB_SYNC_ALL,
		.nr_to_write = LONG_MAX,
		.range_start = 0,
		.range_end = LLONG_MAX,
	};

	return __nfs_write_mapping(mapping, &wbc, how);
}

/*
 * flush the inode to disk.
 */
int nfs_wb_all(struct inode *inode)
{
	return nfs_write_mapping(inode->i_mapping, 0);
}

int nfs_wb_nocommit(struct inode *inode)
{
	return nfs_write_mapping(inode->i_mapping, FLUSH_NOCOMMIT);
}

int nfs_wb_page_cancel(struct inode *inode, struct page *page)
{
	struct nfs_page *req;
	loff_t range_start = page_offset(page);
	loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
	struct writeback_control wbc = {
		.bdi = page->mapping->backing_dev_info,
		.sync_mode = WB_SYNC_ALL,
		.nr_to_write = LONG_MAX,
		.range_start = range_start,
		.range_end = range_end,
	};
	int ret = 0;

	BUG_ON(!PageLocked(page));
	for (;;) {
		req = nfs_page_find_request(page);
		if (req == NULL)
			goto out;
		if (test_bit(PG_CLEAN, &req->wb_flags)) {
			nfs_release_request(req);
			break;
		}
		if (nfs_lock_request_dontget(req)) {
			nfs_inode_remove_request(req);
			/*
			 * In case nfs_inode_remove_request has marked the
			 * page as being dirty
			 */
			cancel_dirty_page(page, PAGE_CACHE_SIZE);
			nfs_unlock_request(req);
			break;
		}
		ret = nfs_wait_on_request(req);
		if (ret < 0)
			goto out;
	}
	if (!PagePrivate(page))
		return 0;
	ret = nfs_sync_mapping_wait(page->mapping, &wbc, FLUSH_INVALIDATE);
out:
	return ret;
}

static int nfs_wb_page_priority(struct inode *inode, struct page *page,
				int how)
{
	loff_t range_start = page_offset(page);
	loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
	struct writeback_control wbc = {
		.bdi = page->mapping->backing_dev_info,
		.sync_mode = WB_SYNC_ALL,
		.nr_to_write = LONG_MAX,
		.range_start = range_start,
		.range_end = range_end,
	};
	int ret;

	do {
		if (clear_page_dirty_for_io(page)) {
			ret = nfs_writepage_locked(page, &wbc);
			if (ret < 0)
				goto out_error;
		} else if (!PagePrivate(page))
			break;
		ret = nfs_sync_mapping_wait(page->mapping, &wbc, how);
		if (ret < 0)
			goto out_error;
	} while (PagePrivate(page));
	return 0;
out_error:
	__mark_inode_dirty(inode, I_DIRTY_PAGES);
	return ret;
}

/*
 * Write back all requests on one page - we do this before reading it.
 */
int nfs_wb_page(struct inode *inode, struct page* page)
{
	return nfs_wb_page_priority(inode, page, FLUSH_STABLE);
}

#ifdef CONFIG_MIGRATION
int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
		struct page *page)
{
	struct nfs_page *req;
	int ret;

	if (PageFsCache(page))
		nfs_fscache_release_page(page, GFP_KERNEL);

	req = nfs_find_and_lock_request(page);
	ret = PTR_ERR(req);
	if (IS_ERR(req))
		goto out;

	ret = migrate_page(mapping, newpage, page);
	if (!req)
		goto out;
	if (ret)
		goto out_unlock;
	page_cache_get(newpage);
	req->wb_page = newpage;
	SetPagePrivate(newpage);
	set_page_private(newpage, page_private(page));
	ClearPagePrivate(page);
	set_page_private(page, 0);
	page_cache_release(page);
out_unlock:
	nfs_clear_page_tag_locked(req);
	nfs_release_request(req);
out:
	return ret;
}
#endif

int __init nfs_init_writepagecache(void)
{
	nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
					     sizeof(struct nfs_write_data),
					     0, SLAB_HWCACHE_ALIGN,
					     NULL);
	if (nfs_wdata_cachep == NULL)
		return -ENOMEM;

	nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
						     nfs_wdata_cachep);
	if (nfs_wdata_mempool == NULL)
		return -ENOMEM;

	nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
						      nfs_wdata_cachep);
	if (nfs_commit_mempool == NULL)
		return -ENOMEM;

	/*
	 * NFS congestion size, scale with available memory.
	 *
	 *  64MB:    8192k
	 * 128MB:   11585k
	 * 256MB:   16384k
	 * 512MB:   23170k
	 *   1GB:   32768k
	 *   2GB:   46340k
	 *   4GB:   65536k
	 *   8GB:   92681k
	 *  16GB:  131072k
	 *
	 * This allows larger machines to have larger/more transfers.
	 * Limit the default to 256M
	 */
	nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
	if (nfs_congestion_kb > 256*1024)
		nfs_congestion_kb = 256*1024;

	return 0;
}

void nfs_destroy_writepagecache(void)
{
	mempool_destroy(nfs_commit_mempool);
	mempool_destroy(nfs_wdata_mempool);
	kmem_cache_destroy(nfs_wdata_cachep);
}