/** * swapin_readahead - swap in pages in hope we need them soon * @entry: swap entry of this memory * @gfp_mask: memory allocation flags * @vma: user vma this address belongs to * @addr: target address for mempolicy * * Returns the struct page for entry and addr, after queueing swapin. * * Primitive swap readahead code. We simply read an aligned block of * (1 << page_cluster) entries in the swap area. This method is chosen * because it doesn't cost us any seek time. We also make sure to queue * the 'original' request together with the readahead ones... * * This has been extended to use the NUMA policies from the mm triggering * the readahead. * * Caller must hold down_read on the vma->vm_mm if vma is not NULL. */ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, struct vm_area_struct *vma, unsigned long addr) { #ifdef CONFIG_SWAP_ENABLE_READAHEAD struct page *page; unsigned long entry_offset = swp_offset(entry); unsigned long offset = entry_offset; unsigned long start_offset, end_offset; unsigned long mask; struct blk_plug plug; mask = swapin_nr_pages(offset) - 1; if (!mask) goto skip; /* Read a page_cluster sized and aligned cluster around offset. */ start_offset = offset & ~mask; end_offset = offset | mask; if (!start_offset) /* First page is swap header. */ start_offset++; blk_start_plug(&plug); for (offset = start_offset; offset <= end_offset ; offset++) { /* Ok, do the async read-ahead now */ page = read_swap_cache_async(swp_entry(swp_type(entry), offset), gfp_mask, vma, addr); if (!page) continue; if (offset != entry_offset) SetPageReadahead(page); page_cache_release(page); } blk_finish_plug(&plug); lru_add_drain(); /* Push any new pages onto the LRU now */ skip: #endif return read_swap_cache_async(entry, gfp_mask, vma, addr); }
/** * swapin_readahead - swap in pages in hope we need them soon * @entry: swap entry of this memory * @gfp_mask: memory allocation flags * @vma: user vma this address belongs to * @addr: target address for mempolicy * * Returns the struct page for entry and addr, after queueing swapin. * * Primitive swap readahead code. We simply read an aligned block of * (1 << page_cluster) entries in the swap area. This method is chosen * because it doesn't cost us any seek time. We also make sure to queue * the 'original' request together with the readahead ones... * * This has been extended to use the NUMA policies from the mm triggering * the readahead. * * Caller must hold down_read on the vma->vm_mm if vma is not NULL. */ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, struct vm_area_struct *vma, unsigned long addr) { struct page *page; unsigned long entry_offset = swp_offset(entry); unsigned long offset = entry_offset; unsigned long start_offset, end_offset; unsigned long mask; struct blk_plug plug; bool do_poll = true; mask = swapin_nr_pages(offset) - 1; if (!mask) goto skip; do_poll = false; /* Read a page_cluster sized and aligned cluster around offset. */ start_offset = offset & ~mask; end_offset = offset | mask; if (!start_offset) /* First page is swap header. */ start_offset++; blk_start_plug(&plug); for (offset = start_offset; offset <= end_offset ; offset++) { /* Ok, do the async read-ahead now */ page = read_swap_cache_async(swp_entry(swp_type(entry), offset), gfp_mask, vma, addr, false); if (!page) continue; if (offset != entry_offset && likely(!PageTransCompound(page))) SetPageReadahead(page); put_page(page); } blk_finish_plug(&plug); lru_add_drain(); /* Push any new pages onto the LRU now */ skip: return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll); }
int mpage_readpages_compressed(struct address_space *mapping, struct list_head *pages, unsigned nr_pages, get_block_t get_block) { struct bio *bio = NULL; struct inode *inode = mapping->host; unsigned page_idx, count, nr_to_read; sector_t last_block_in_bio = 0; struct buffer_head map_bh; unsigned long first_logical_block = 0; struct compressed_bio *cb; struct page *page; loff_t isize = i_size_read(inode); unsigned long prev_index = 0, end_index = ((isize - 1) >> PAGE_CACHE_SHIFT); struct list_head *list; list = pages->prev; for (page_idx = 0; page_idx < nr_pages; page_idx++) { page = list_entry(list, struct page, lru); prev_index = page->index; list = list->prev; } if (prev_index == end_index || nr_pages >= COMPRESSION_STRIDE_LEN) goto again; /* Start Readahead : mm/readahead.c*/ prev_index++; nr_to_read = COMPRESSION_STRIDE_LEN - nr_pages; printk(KERN_INFO "Start Readahead for %u pages", nr_to_read); for (page_idx = 0; page_idx < nr_to_read; page_idx++) { pgoff_t page_offset = prev_index + page_idx; if (page_offset > end_index) break; rcu_read_lock(); page = radix_tree_lookup(&mapping->page_tree, page_offset); rcu_read_unlock(); if (page) continue; page = page_cache_alloc_readahead(mapping); if (!page) { printk(KERN_INFO "Page Readahead Failed"); break; } page->index = page_offset; list_add(&page->lru, pages); if (page_idx == nr_to_read) SetPageReadahead(page); nr_pages++; } again: cb = NULL; map_bh.b_state = 0; map_bh.b_size = 0; printk(KERN_INFO "\n\n==> IN MPAGE_READPAGES | nr_pages : %u", nr_pages); count = min_t(unsigned, nr_pages, COMPRESSION_STRIDE_LEN); for (page_idx = 0; page_idx < count; page_idx++) { if (list_empty(pages->prev)) break; page = list_entry(pages->prev, struct page, lru); prefetchw(&page->flags); list_del(&page->lru); if (!add_to_page_cache_lru(page, mapping, page->index, GFP_KERNEL)) { /* first_logical : first_logical_block_of_extent * last_blk_in_bio : increments to last physical of bio */ printk(KERN_INFO "\n IN DO_MPAGE_READPAGE"); bio = do_mpage_readpage(bio, page, nr_pages - page_idx, &last_block_in_bio, &map_bh, &first_logical_block, &cb, get_block); assert(cb); printk(KERN_INFO "\n OUT DO_MPAGE_READPAGE"); } page_cache_release(page); } printk(KERN_INFO "\n\n==>OUT MPAGE_READPAGES | first_logical : %lu",first_logical_block); /* create and submit bio for compressed_read */ for (page_idx = 0; page_idx < cb->nr_pages; page_idx++) { page = alloc_page(GFP_NOFS |__GFP_HIGHMEM); page->mapping = NULL; page->index = cb->start + page_idx; cb->compressed_pages[page_idx] = page; /* Try to add pages to exists bio */ if (!bio || !bio_add_page(bio, page, PAGE_CACHE_SIZE, 0)) { /* Couldn't add. So submit old bio and allocate new bio */ if (bio) bio = mpage_bio_submit(READ, bio); bio = mpage_alloc(map_bh.b_bdev, (map_bh.b_blocknr + page_idx) << (cb->inode->i_blkbits - 9), min_t(int, cb->nr_pages - page_idx, bio_get_nr_vecs(map_bh.b_bdev)), GFP_NOFS); bio->bi_private = cb; if (!bio_add_page(bio, page, PAGE_CACHE_SIZE, 0)) assert(0); /* why? */ } } if (bio) bio = mpage_bio_submit(READ, bio); nr_pages -= count; if(nr_pages > 0) goto again; BUG_ON(!list_empty(pages)); return 0; }