示例#1
0
文件: mpage.c 项目: Chong-Li/cse522
/**
 * mpage_readpages - populate an address space with some pages & start reads against them
 * @mapping: the address_space
 * @pages: The address of a list_head which contains the target pages.  These
 *   pages have their ->index populated and are otherwise uninitialised.
 *   The page at @pages->prev has the lowest file offset, and reads should be
 *   issued in @pages->prev to @pages->next order.
 * @nr_pages: The number of pages at *@pages
 * @get_block: The filesystem's block mapper function.
 *
 * This function walks the pages and the blocks within each page, building and
 * emitting large BIOs.
 *
 * If anything unusual happens, such as:
 *
 * - encountering a page which has buffers
 * - encountering a page which has a non-hole after a hole
 * - encountering a page with non-contiguous blocks
 *
 * then this code just gives up and calls the buffer_head-based read function.
 * It does handle a page which has holes at the end - that is a common case:
 * the end-of-file on blocksize < PAGE_CACHE_SIZE setups.
 *
 * BH_Boundary explanation:
 *
 * There is a problem.  The mpage read code assembles several pages, gets all
 * their disk mappings, and then submits them all.  That's fine, but obtaining
 * the disk mappings may require I/O.  Reads of indirect blocks, for example.
 *
 * So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be
 * submitted in the following order:
 * 	12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16
 *
 * because the indirect block has to be read to get the mappings of blocks
 * 13,14,15,16.  Obviously, this impacts performance.
 *
 * So what we do it to allow the filesystem's get_block() function to set
 * BH_Boundary when it maps block 11.  BH_Boundary says: mapping of the block
 * after this one will require I/O against a block which is probably close to
 * this one.  So you should push what I/O you have currently accumulated.
 *
 * This all causes the disk requests to be issued in the correct order.
 */
int
mpage_readpages(struct address_space *mapping, struct list_head *pages,
				unsigned nr_pages, get_block_t get_block)
{
	struct bio *bio = NULL;
	unsigned page_idx;
	sector_t last_block_in_bio = 0;
	struct buffer_head map_bh;
	unsigned long first_logical_block = 0;
	gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);

	map_bh.b_state = 0;
	map_bh.b_size = 0;
	for (page_idx = 0; page_idx < nr_pages; page_idx++) {
		struct page *page = list_entry(pages->prev, struct page, lru);

		prefetchw(&page->flags);
		list_del(&page->lru);
		if (!add_to_page_cache_lru(page, mapping,
					page->index,
					gfp)) {
			bio = do_mpage_readpage(bio, page,
					nr_pages - page_idx,
					&last_block_in_bio, &map_bh,
					&first_logical_block,
					get_block, gfp);
		}
		page_cache_release(page);
	}
	BUG_ON(!list_empty(pages));
	if (bio)
		mpage_bio_submit(READ, bio);
	return 0;
}
示例#2
0
文件: mpage.c 项目: uarka/linux-next
/*
 * This isn't called much at all
 */
int mpage_readpage(struct page *page, get_block_t get_block)
{
	struct bio *bio = NULL;
	sector_t last_block_in_bio = 0;
	struct buffer_head map_bh;
	unsigned long first_logical_block = 0;

	map_bh.b_state = 0;
	map_bh.b_size = 0;
	bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio,
			&map_bh, &first_logical_block, get_block);
	if (bio)
		mpage_bio_submit(READ, bio);
	return 0;
}
示例#3
0
文件: mpage.c 项目: Vhacker1995/linux
/*
 * This isn't called much at all
 */
int mpage_readpage(struct page *page, get_block_t get_block)
{
    struct bio *bio = NULL;
    sector_t last_block_in_bio = 0;
    struct buffer_head map_bh;
    unsigned long first_logical_block = 0;
    gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);

    map_bh.b_state = 0;
    map_bh.b_size = 0;
    bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio,
                            &map_bh, &first_logical_block, get_block, gfp);
    if (bio)
        mpage_bio_submit(REQ_OP_READ, 0, bio);
    return 0;
}
int
mpage_readpages_compressed(struct address_space *mapping, struct list_head *pages,
				unsigned nr_pages, get_block_t get_block)
{
	struct bio *bio = NULL;
	struct inode *inode = mapping->host;
	unsigned page_idx, count, nr_to_read;
	sector_t last_block_in_bio = 0;
	struct buffer_head map_bh;
	unsigned long first_logical_block = 0;
	struct compressed_bio *cb;
	struct page *page;
	
	loff_t isize = i_size_read(inode);
	unsigned long prev_index = 0, end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
	struct list_head *list;
	
	list = pages->prev;
	for (page_idx = 0; page_idx < nr_pages; page_idx++) {
		page = list_entry(list, struct page, lru);
		prev_index = page->index;
		list = list->prev;
	}
	if (prev_index == end_index || nr_pages >= COMPRESSION_STRIDE_LEN)
		goto again;
		
	/* Start Readahead : mm/readahead.c*/
	prev_index++;
	nr_to_read = COMPRESSION_STRIDE_LEN - nr_pages;
	printk(KERN_INFO "Start Readahead for %u pages", nr_to_read);
	for (page_idx = 0; page_idx < nr_to_read; page_idx++) {
		pgoff_t page_offset = prev_index + page_idx;

		if (page_offset > end_index)
			break;

		rcu_read_lock();
		page = radix_tree_lookup(&mapping->page_tree, page_offset);
		rcu_read_unlock();
		if (page)
			continue;

		page = page_cache_alloc_readahead(mapping);
		if (!page) {
			printk(KERN_INFO "Page Readahead Failed");
			break;
		}
		page->index = page_offset;
		list_add(&page->lru, pages);
		if (page_idx == nr_to_read)
			SetPageReadahead(page);
		nr_pages++;
	}
				
again:
	cb = NULL;
	map_bh.b_state = 0;
	map_bh.b_size = 0;
	printk(KERN_INFO "\n\n==> IN MPAGE_READPAGES | nr_pages : %u", nr_pages);
	count = min_t(unsigned, nr_pages, COMPRESSION_STRIDE_LEN);
	for (page_idx = 0; page_idx < count; page_idx++) {		
		if (list_empty(pages->prev))
			break;
		
		page = list_entry(pages->prev, struct page, lru);
		prefetchw(&page->flags);
		list_del(&page->lru);
		
		if (!add_to_page_cache_lru(page, mapping,
					page->index, GFP_KERNEL)) {
			
			/* first_logical   : first_logical_block_of_extent
			 * last_blk_in_bio : increments to last physical of bio
			 */
			printk(KERN_INFO "\n IN DO_MPAGE_READPAGE");
			bio = do_mpage_readpage(bio, page,
						nr_pages - page_idx,
						&last_block_in_bio, &map_bh,
						&first_logical_block, &cb,
						get_block);
			assert(cb);
			printk(KERN_INFO "\n OUT DO_MPAGE_READPAGE");
		}
		page_cache_release(page);
	}
	printk(KERN_INFO "\n\n==>OUT MPAGE_READPAGES | first_logical : %lu",first_logical_block);

	/* create and submit bio for compressed_read */
	for (page_idx = 0; page_idx < cb->nr_pages; page_idx++) {
		page = alloc_page(GFP_NOFS |__GFP_HIGHMEM);
		page->mapping = NULL;
		page->index = cb->start + page_idx;
		cb->compressed_pages[page_idx] = page;
		
		/* Try to add pages to exists bio */
		if (!bio || !bio_add_page(bio, page, PAGE_CACHE_SIZE, 0)) {
			/* Couldn't add. So submit old bio and allocate new bio */
			if (bio)
				bio = mpage_bio_submit(READ, bio);

			bio = mpage_alloc(map_bh.b_bdev, (map_bh.b_blocknr + page_idx) << (cb->inode->i_blkbits - 9),
					  min_t(int, cb->nr_pages - page_idx, bio_get_nr_vecs(map_bh.b_bdev)), 
					  GFP_NOFS); 
			bio->bi_private = cb;
			
			if (!bio_add_page(bio, page, PAGE_CACHE_SIZE, 0))
				assert(0);	/* why? */
		}		
	}
	
	if (bio)
		bio = mpage_bio_submit(READ, bio);

	nr_pages -= count;
	if(nr_pages > 0)
		goto again;
	
	BUG_ON(!list_empty(pages));
	return 0;
}