Exemple #1
0
static void nfs_invalidate_page(struct page *page, unsigned long offset)
{
	if (offset != 0)
		return;
	/* Cancel any unstarted writes on this page */
	nfs_wb_page_priority(page->mapping->host, page, FLUSH_INVALIDATE);
}
Exemple #2
0
/* Two pass sync: first using WB_SYNC_NONE, then WB_SYNC_ALL */
static int nfs_write_mapping(struct address_space *mapping, int how)
{
	struct writeback_control wbc = {
		.bdi = mapping->backing_dev_info,
		.sync_mode = WB_SYNC_ALL,
		.nr_to_write = LONG_MAX,
		.range_start = 0,
		.range_end = LLONG_MAX,
	};

	return __nfs_write_mapping(mapping, &wbc, how);
}

/*
 * flush the inode to disk.
 */
int nfs_wb_all(struct inode *inode)
{
	return nfs_write_mapping(inode->i_mapping, 0);
}

int nfs_wb_nocommit(struct inode *inode)
{
	return nfs_write_mapping(inode->i_mapping, FLUSH_NOCOMMIT);
}

int nfs_wb_page_cancel(struct inode *inode, struct page *page)
{
	struct nfs_page *req;
	loff_t range_start = page_offset(page);
	loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
	struct writeback_control wbc = {
		.bdi = page->mapping->backing_dev_info,
		.sync_mode = WB_SYNC_ALL,
		.nr_to_write = LONG_MAX,
		.range_start = range_start,
		.range_end = range_end,
	};
	int ret = 0;

	BUG_ON(!PageLocked(page));
	for (;;) {
		req = nfs_page_find_request(page);
		if (req == NULL)
			goto out;
		if (test_bit(PG_CLEAN, &req->wb_flags)) {
			nfs_release_request(req);
			break;
		}
		if (nfs_lock_request_dontget(req)) {
			nfs_inode_remove_request(req);
			/*
			 * In case nfs_inode_remove_request has marked the
			 * page as being dirty
			 */
			cancel_dirty_page(page, PAGE_CACHE_SIZE);
			nfs_unlock_request(req);
			break;
		}
		ret = nfs_wait_on_request(req);
		if (ret < 0)
			goto out;
	}
	if (!PagePrivate(page))
		return 0;
	ret = nfs_sync_mapping_wait(page->mapping, &wbc, FLUSH_INVALIDATE);
out:
	return ret;
}

static int nfs_wb_page_priority(struct inode *inode, struct page *page,
				int how)
{
	loff_t range_start = page_offset(page);
	loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
	struct writeback_control wbc = {
		.bdi = page->mapping->backing_dev_info,
		.sync_mode = WB_SYNC_ALL,
		.nr_to_write = LONG_MAX,
		.range_start = range_start,
		.range_end = range_end,
	};
	int ret;

	do {
		if (clear_page_dirty_for_io(page)) {
			ret = nfs_writepage_locked(page, &wbc);
			if (ret < 0)
				goto out_error;
		} else if (!PagePrivate(page))
			break;
		ret = nfs_sync_mapping_wait(page->mapping, &wbc, how);
		if (ret < 0)
			goto out_error;
	} while (PagePrivate(page));
	return 0;
out_error:
	__mark_inode_dirty(inode, I_DIRTY_PAGES);
	return ret;
}

/*
 * Write back all requests on one page - we do this before reading it.
 */
int nfs_wb_page(struct inode *inode, struct page* page)
{
	return nfs_wb_page_priority(inode, page, FLUSH_STABLE);
}

#ifdef CONFIG_MIGRATION
int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
		struct page *page)
{
	struct nfs_page *req;
	int ret;

	if (PageFsCache(page))
		nfs_fscache_release_page(page, GFP_KERNEL);

	req = nfs_find_and_lock_request(page);
	ret = PTR_ERR(req);
	if (IS_ERR(req))
		goto out;

	ret = migrate_page(mapping, newpage, page);
	if (!req)
		goto out;
	if (ret)
		goto out_unlock;
	page_cache_get(newpage);
	req->wb_page = newpage;
	SetPagePrivate(newpage);
	set_page_private(newpage, page_private(page));
	ClearPagePrivate(page);
	set_page_private(page, 0);
	page_cache_release(page);
out_unlock:
	nfs_clear_page_tag_locked(req);
	nfs_release_request(req);
out:
	return ret;
}
#endif

int __init nfs_init_writepagecache(void)
{
	nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
					     sizeof(struct nfs_write_data),
					     0, SLAB_HWCACHE_ALIGN,
					     NULL);
	if (nfs_wdata_cachep == NULL)
		return -ENOMEM;

	nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
						     nfs_wdata_cachep);
	if (nfs_wdata_mempool == NULL)
		return -ENOMEM;

	nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
						      nfs_wdata_cachep);
	if (nfs_commit_mempool == NULL)
		return -ENOMEM;

	/*
	 * NFS congestion size, scale with available memory.
	 *
	 *  64MB:    8192k
	 * 128MB:   11585k
	 * 256MB:   16384k
	 * 512MB:   23170k
	 *   1GB:   32768k
	 *   2GB:   46340k
	 *   4GB:   65536k
	 *   8GB:   92681k
	 *  16GB:  131072k
	 *
	 * This allows larger machines to have larger/more transfers.
	 * Limit the default to 256M
	 */
	nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
	if (nfs_congestion_kb > 256*1024)
		nfs_congestion_kb = 256*1024;

	return 0;
}

void nfs_destroy_writepagecache(void)
{
	mempool_destroy(nfs_commit_mempool);
	mempool_destroy(nfs_wdata_mempool);
	kmem_cache_destroy(nfs_wdata_cachep);
}