static int lfs_readpage(struct file *file, struct page *page) { struct inode *inode = page->mapping->host; sector_t iblock, block; unsigned int blocksize; struct buffer_head *bh, *head; blocksize = 1 << inode->i_blkbits; if (!page_has_buffers(page)) create_empty_buffers(page, blocksize, 0); head = page_buffers(page); bh = head; iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); do { struct buffer_head *bh_temp; block = lfs_disk_block(inode, iblock); dprintk("searching for block %Lu in segments: ", (long long unsigned int)block); bh_temp = lfs_read_block(inode, iblock); if(bh_temp) { dprintk("FOUND\n"); memcpy(bh->b_data, bh_temp->b_data, LFS_BSIZE); set_buffer_uptodate(bh); brelse(bh_temp); } else dprintk("NOT FOUND\n"); } while (iblock++, (bh = bh->b_this_page) != head); return block_read_full_page(page, lfs_map_block); }
static int ocfs2_readpage(struct file *file, struct page *page) { struct inode *inode = page->mapping->host; loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT; int ret, unlock = 1; mlog_entry("(0x%p, %lu)\n", file, (page ? page->index : 0)); ret = ocfs2_meta_lock_with_page(inode, NULL, 0, page); if (ret != 0) { if (ret == AOP_TRUNCATED_PAGE) unlock = 0; mlog_errno(ret); goto out; } if (down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem) == 0) { ret = AOP_TRUNCATED_PAGE; goto out_meta_unlock; } /* * i_size might have just been updated as we grabed the meta lock. We * might now be discovering a truncate that hit on another node. * block_read_full_page->get_block freaks out if it is asked to read * beyond the end of a file, so we check here. Callers * (generic_file_read, vm_ops->fault) are clever enough to check i_size * and notice that the page they just read isn't needed. * * XXX sys_readahead() seems to get that wrong? */ if (start >= i_size_read(inode)) { zero_user_page(page, 0, PAGE_SIZE, KM_USER0); SetPageUptodate(page); ret = 0; goto out_alloc; } ret = ocfs2_data_lock_with_page(inode, 0, page); if (ret != 0) { if (ret == AOP_TRUNCATED_PAGE) unlock = 0; mlog_errno(ret); goto out_alloc; } ret = block_read_full_page(page, ocfs2_get_block); unlock = 0; ocfs2_data_unlock(inode, 0); out_alloc: up_read(&OCFS2_I(inode)->ip_alloc_sem); out_meta_unlock: ocfs2_meta_unlock(inode, 0); out: if (unlock) unlock_page(page); mlog_exit(ret); return ret; }
static int ocfs2_readpage(struct file *file, struct page *page) { struct inode *inode = page->mapping->host; struct ocfs2_inode_info *oi = OCFS2_I(inode); loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT; int ret, unlock = 1; trace_ocfs2_readpage((unsigned long long)oi->ip_blkno, (page ? page->index : 0)); ret = ocfs2_inode_lock_with_page(inode, NULL, 0, page); if (ret != 0) { if (ret == AOP_TRUNCATED_PAGE) unlock = 0; mlog_errno(ret); goto out; } if (down_read_trylock(&oi->ip_alloc_sem) == 0) { ret = AOP_TRUNCATED_PAGE; unlock_page(page); unlock = 0; down_read(&oi->ip_alloc_sem); up_read(&oi->ip_alloc_sem); goto out_inode_unlock; } if (start >= i_size_read(inode)) { zero_user(page, 0, PAGE_SIZE); SetPageUptodate(page); ret = 0; goto out_alloc; } if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) ret = ocfs2_readpage_inline(inode, page); else ret = block_read_full_page(page, ocfs2_get_block); unlock = 0; out_alloc: up_read(&OCFS2_I(inode)->ip_alloc_sem); out_inode_unlock: ocfs2_inode_unlock(inode, 0); out: if (unlock) unlock_page(page); return ret; }
static int minix_readpage(struct file *file, struct page *page) { return block_read_full_page(page,minix_get_block); }
static int hfs_readpage(struct file *file, struct page *page) { return block_read_full_page(page, hfs_get_block); }
int ext4_mpage_readpages(struct address_space *mapping, struct list_head *pages, struct page *page, unsigned nr_pages, bool is_readahead) { struct bio *bio = NULL; sector_t last_block_in_bio = 0; struct inode *inode = mapping->host; const unsigned blkbits = inode->i_blkbits; const unsigned blocks_per_page = PAGE_SIZE >> blkbits; const unsigned blocksize = 1 << blkbits; sector_t block_in_file; sector_t last_block; sector_t last_block_in_file; sector_t blocks[MAX_BUF_PER_PAGE]; unsigned page_block; struct block_device *bdev = inode->i_sb->s_bdev; int length; unsigned relative_block = 0; struct ext4_map_blocks map; map.m_pblk = 0; map.m_lblk = 0; map.m_len = 0; map.m_flags = 0; for (; nr_pages; nr_pages--) { int fully_mapped = 1; unsigned first_hole = blocks_per_page; prefetchw(&page->flags); if (pages) { page = lru_to_page(pages); list_del(&page->lru); if (add_to_page_cache_lru(page, mapping, page->index, readahead_gfp_mask(mapping))) goto next_page; } if (page_has_buffers(page)) goto confused; block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits); last_block = block_in_file + nr_pages * blocks_per_page; last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; if (last_block > last_block_in_file) last_block = last_block_in_file; page_block = 0; /* * Map blocks using the previous result first. */ if ((map.m_flags & EXT4_MAP_MAPPED) && block_in_file > map.m_lblk && block_in_file < (map.m_lblk + map.m_len)) { unsigned map_offset = block_in_file - map.m_lblk; unsigned last = map.m_len - map_offset; for (relative_block = 0; ; relative_block++) { if (relative_block == last) { /* needed? */ map.m_flags &= ~EXT4_MAP_MAPPED; break; } if (page_block == blocks_per_page) break; blocks[page_block] = map.m_pblk + map_offset + relative_block; page_block++; block_in_file++; } } /* * Then do more ext4_map_blocks() calls until we are * done with this page. */ while (page_block < blocks_per_page) { if (block_in_file < last_block) { map.m_lblk = block_in_file; map.m_len = last_block - block_in_file; if (ext4_map_blocks(NULL, inode, &map, 0) < 0) { set_error_page: SetPageError(page); zero_user_segment(page, 0, PAGE_SIZE); unlock_page(page); goto next_page; } } if ((map.m_flags & EXT4_MAP_MAPPED) == 0) { fully_mapped = 0; if (first_hole == blocks_per_page) first_hole = page_block; page_block++; block_in_file++; continue; } if (first_hole != blocks_per_page) goto confused; /* hole -> non-hole */ /* Contiguous blocks? */ if (page_block && blocks[page_block-1] != map.m_pblk-1) goto confused; for (relative_block = 0; ; relative_block++) { if (relative_block == map.m_len) { /* needed? */ map.m_flags &= ~EXT4_MAP_MAPPED; break; } else if (page_block == blocks_per_page) break; blocks[page_block] = map.m_pblk+relative_block; page_block++; block_in_file++; } } if (first_hole != blocks_per_page) { zero_user_segment(page, first_hole << blkbits, PAGE_SIZE); if (first_hole == 0) { SetPageUptodate(page); unlock_page(page); goto next_page; } } else if (fully_mapped) { SetPageMappedToDisk(page); } if (fully_mapped && blocks_per_page == 1 && !PageUptodate(page) && cleancache_get_page(page) == 0) { SetPageUptodate(page); goto confused; } /* * This page will go to BIO. Do we need to send this * BIO off first? */ if (bio && (last_block_in_bio != blocks[0] - 1)) { submit_and_realloc: submit_bio(bio); bio = NULL; } if (bio == NULL) { struct fscrypt_ctx *ctx = NULL; if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) { ctx = fscrypt_get_ctx(inode, GFP_NOFS); if (IS_ERR(ctx)) goto set_error_page; } bio = bio_alloc(GFP_KERNEL, min_t(int, nr_pages, BIO_MAX_PAGES)); if (!bio) { if (ctx) fscrypt_release_ctx(ctx); goto set_error_page; } bio_set_dev(bio, bdev); bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9); bio->bi_end_io = mpage_end_io; bio->bi_private = ctx; bio_set_op_attrs(bio, REQ_OP_READ, is_readahead ? REQ_RAHEAD : 0); } length = first_hole << blkbits; if (bio_add_page(bio, page, length, 0) < length) goto submit_and_realloc; if (((map.m_flags & EXT4_MAP_BOUNDARY) && (relative_block == map.m_len)) || (first_hole != blocks_per_page)) { submit_bio(bio); bio = NULL; } else last_block_in_bio = blocks[blocks_per_page - 1]; goto next_page; confused: if (bio) { submit_bio(bio); bio = NULL; } if (!PageUptodate(page)) block_read_full_page(page, ext4_get_block); else unlock_page(page); next_page: if (pages) put_page(page); } BUG_ON(pages && !list_empty(pages)); if (bio) submit_bio(bio); return 0; }
/* * This is the worker routine which does all the work of mapping the disk * blocks and constructs largest possible bios, submits them for IO if the * blocks are not contiguous on the disk. * * We pass a buffer_head back and forth and use its buffer_mapped() flag to * represent the validity of its disk mapping and to decide when to do the next * get_block() call. */ static struct bio * do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, sector_t *last_block_in_bio, struct buffer_head *map_bh, unsigned long *first_logical_block, get_block_t get_block, gfp_t gfp) { struct inode *inode = page->mapping->host; const unsigned blkbits = inode->i_blkbits; const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; const unsigned blocksize = 1 << blkbits; sector_t block_in_file; sector_t last_block; sector_t last_block_in_file; sector_t blocks[MAX_BUF_PER_PAGE]; unsigned page_block; unsigned first_hole = blocks_per_page; struct block_device *bdev = NULL; int length; int fully_mapped = 1; unsigned nblocks; unsigned relative_block; if (page_has_buffers(page)) goto confused; block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); last_block = block_in_file + nr_pages * blocks_per_page; last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; if (last_block > last_block_in_file) last_block = last_block_in_file; page_block = 0; /* * Map blocks using the result from the previous get_blocks call first. */ nblocks = map_bh->b_size >> blkbits; if (buffer_mapped(map_bh) && block_in_file > *first_logical_block && block_in_file < (*first_logical_block + nblocks)) { unsigned map_offset = block_in_file - *first_logical_block; unsigned last = nblocks - map_offset; for (relative_block = 0; ; relative_block++) { if (relative_block == last) { clear_buffer_mapped(map_bh); break; } if (page_block == blocks_per_page) break; blocks[page_block] = map_bh->b_blocknr + map_offset + relative_block; page_block++; block_in_file++; } bdev = map_bh->b_bdev; } /* * Then do more get_blocks calls until we are done with this page. */ map_bh->b_page = page; while (page_block < blocks_per_page) { map_bh->b_state = 0; map_bh->b_size = 0; if (block_in_file < last_block) { map_bh->b_size = (last_block-block_in_file) << blkbits; if (get_block(inode, block_in_file, map_bh, 0)) goto confused; *first_logical_block = block_in_file; } if (!buffer_mapped(map_bh)) { fully_mapped = 0; if (first_hole == blocks_per_page) first_hole = page_block; page_block++; block_in_file++; continue; } /* some filesystems will copy data into the page during * the get_block call, in which case we don't want to * read it again. map_buffer_to_page copies the data * we just collected from get_block into the page's buffers * so readpage doesn't have to repeat the get_block call */ if (buffer_uptodate(map_bh)) { map_buffer_to_page(page, map_bh, page_block); goto confused; } if (first_hole != blocks_per_page) goto confused; /* hole -> non-hole */ /* Contiguous blocks? */ if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1) goto confused; nblocks = map_bh->b_size >> blkbits; for (relative_block = 0; ; relative_block++) { if (relative_block == nblocks) { clear_buffer_mapped(map_bh); break; } else if (page_block == blocks_per_page) break; blocks[page_block] = map_bh->b_blocknr+relative_block; page_block++; block_in_file++; } bdev = map_bh->b_bdev; } if (first_hole != blocks_per_page) { zero_user_segment(page, first_hole << blkbits, PAGE_CACHE_SIZE); if (first_hole == 0) { SetPageUptodate(page); unlock_page(page); goto out; } } else if (fully_mapped) { SetPageMappedToDisk(page); } if (fully_mapped && blocks_per_page == 1 && !PageUptodate(page) && cleancache_get_page(page) == 0) { SetPageUptodate(page); goto confused; } /* * This page will go to BIO. Do we need to send this BIO off first? */ if (bio && (*last_block_in_bio != blocks[0] - 1)) bio = mpage_bio_submit(READ, bio); alloc_new: if (bio == NULL) { if (first_hole == blocks_per_page) { if (!bdev_read_page(bdev, blocks[0] << (blkbits - 9), page)) goto out; } bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), min_t(int, nr_pages, BIO_MAX_PAGES), gfp); if (bio == NULL) goto confused; } length = first_hole << blkbits; if (bio_add_page(bio, page, length, 0) < length) { bio = mpage_bio_submit(READ, bio); goto alloc_new; } relative_block = block_in_file - *first_logical_block; nblocks = map_bh->b_size >> blkbits; if ((buffer_boundary(map_bh) && relative_block == nblocks) || (first_hole != blocks_per_page)) bio = mpage_bio_submit(READ, bio); else *last_block_in_bio = blocks[blocks_per_page - 1]; out: return bio; confused: if (bio) bio = mpage_bio_submit(READ, bio); if (!PageUptodate(page)) block_read_full_page(page, get_block); else unlock_page(page); goto out; }
static int blkdev_readpage(struct file * file, struct page * page) { return block_read_full_page(page, blkdev_get_block); }
/** * vxfs_readpage - read one page synchronously into the pagecache * @file: file context (unused) * @page: page frame to fill in. * * Description: * The vxfs_readpage routine reads @page synchronously into the * pagecache. * * Returns: * Zero on success, else a negative error code. * * Locking status: * @page is locked and will be unlocked. */ static int vxfs_readpage(struct file *file, struct page *page) { return block_read_full_page(page, vxfs_getblk); }
int tarfs_readpage(struct file *file, struct page *page) { return block_read_full_page(page, tarfs_get_block); }
/* * This is the worker routine which does all the work of mapping the disk * blocks and constructs largest possible bios, submits them for IO if the * blocks are not contiguous on the disk. * * We pass a buffer_head back and forth and use its buffer_mapped() flag to * represent the validity of its disk mapping and to decide when to do the next * get_block() call. */ static struct bio * do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, sector_t *last_block_in_bio, struct buffer_head *map_bh, unsigned long *first_logical_block, struct compressed_bio **cb, get_block_t get_block) { struct inode *inode = page->mapping->host; const unsigned blkbits = inode->i_blkbits; const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; //SET TO 1 const unsigned blocksize = 1 << blkbits; sector_t block_in_file; sector_t last_block; sector_t last_block_in_file; sector_t blocks[MAX_BUF_PER_PAGE]; unsigned page_block; //Increments to 1 unsigned first_hole = blocks_per_page; struct block_device *bdev = NULL; int fully_mapped = 1; unsigned nblocks; unsigned relative_block; int err; /* blkbits = 12 | MAX_BUF_PER_PAGE = 8 */ if (page_has_buffers(page)) goto confused; /* block_in_file : page->index * last_block : last page->index of requested nr_pages * last_block_in_file : always index of last_page_of_file */ block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); last_block = block_in_file + nr_pages * blocks_per_page; last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; if (last_block > last_block_in_file) last_block = last_block_in_file; page_block = 0; /* * Map blocks using the result from the previous get_blocks call first. */ /* nblocks : Initially 0 | Later mapped to 1 extent so is mostly 16 */ nblocks = map_bh->b_size >> blkbits; printk(KERN_INFO "\ncurrent_page : %Lu | nblocks_initial : %u", block_in_file, nblocks); if (buffer_mapped(map_bh) && block_in_file > *first_logical_block && block_in_file < (*first_logical_block + nblocks)) { unsigned map_offset = block_in_file - *first_logical_block; unsigned last = nblocks - map_offset; for (relative_block = 0; ; relative_block++) { if (relative_block == last) { clear_buffer_mapped(map_bh); break; } if (page_block == blocks_per_page) break; blocks[page_block] = map_bh->b_blocknr + map_offset + relative_block; page_block++; block_in_file++; } bdev = map_bh->b_bdev; } /* * Then do more get_blocks calls until we are done with this page. */ map_bh->b_page = page; while (page_block < blocks_per_page) { map_bh->b_state = 0; map_bh->b_size = 0; if (block_in_file < last_block) { map_bh->b_size = (last_block - block_in_file) << blkbits; /* use of get_block => ***needs buffer_head map_bh * bdev = map_bh->b_dev * physical = map_bh->b_blocknr * nblocks = map_bh->b_size (no of logical blocks in extent) * compress_count = map_bh->b_private * first_logical_block */ if (get_block(inode, block_in_file, map_bh, 0)) //BLOCK_MAPPER goto confused; *first_logical_block = block_in_file; } /* generally is mapped.. so FALSE */ if (!buffer_mapped(map_bh)) { fully_mapped = 0; if (first_hole == blocks_per_page) first_hole = page_block; page_block++; block_in_file++; continue; } /* some filesystems will copy data into the page during * the get_block call, in which case we don't want to * read it again. map_buffer_to_page copies the data * we just collected from get_block into the page's buffers * so readpage doesn't have to repeat the get_block call */ //NEXT 3 => FALSE if (buffer_uptodate(map_bh)) { printk("\nIn map_buffer_to_page()"); map_buffer_to_page(page, map_bh, page_block); goto confused; } if (first_hole != blocks_per_page) goto confused; /* hole -> non-hole */ /* Contiguous blocks? */ if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1) goto confused; nblocks = map_bh->b_size >> blkbits; printk(KERN_INFO "\nnblocks_mapped : %u", nblocks); //MAIN PART if (!*cb) { *cb = kzalloc(sizeof(struct compressed_bio), GFP_NOFS); if (!*cb) { /* ERROR */ BUG_ON(1); err = -ENOMEM; } err = compressed_bio_init(*cb, inode, *first_logical_block, *(unsigned *)map_bh->b_private, nblocks << PAGE_CACHE_SHIFT, 0);//compressed_len = 0 if (err) { /* ERROR = -ENOMEM */ err = -ENOMEM; } kfree((unsigned *)map_bh->b_private); map_bh->b_private = NULL; } for (relative_block = 0; ; relative_block++) { if (relative_block == nblocks) { clear_buffer_mapped(map_bh); break; } else if (page_block == blocks_per_page) break; blocks[page_block] = map_bh->b_blocknr+relative_block; page_block++; block_in_file++; } bdev = map_bh->b_bdev; } if (first_hole != blocks_per_page) { zero_user_segment(page, first_hole << blkbits, PAGE_CACHE_SIZE); if (first_hole == 0) { SetPageUptodate(page); unlock_page(page); goto out; } } else if (fully_mapped) { //TRUE...REQ? SetPageMappedToDisk(page); } if (fully_mapped && blocks_per_page == 1 && !PageUptodate(page) && cleancache_get_page(page) == 0) { //FALSE printk(KERN_INFO "\nSet_Page_Uptodate"); SetPageUptodate(page); goto confused; } /* * This page will go to BIO. Do we need to send this BIO off first? */ /* if (bio && (*last_block_in_bio != blocks[0] - 1)) */ /* bio = mpage_bio_submit(READ, bio); */ /* alloc_new: */ /* if (bio == NULL) { */ /* bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), */ /* min_t(int, nr_pages, bio_get_nr_vecs(bdev)), */ /* GFP_KERNEL); */ /* if (bio == NULL) */ /* goto confused; */ /* } */ /* length = first_hole << blkbits; */ /* if (bio_add_page(bio, page, length, 0) < length) { */ /* bio = mpage_bio_submit(READ, bio); */ /* goto alloc_new; */ /* } */ /* relative_block = block_in_file - *first_logical_block; */ /* nblocks = map_bh->b_size >> blkbits; */ /* if ((buffer_boundary(map_bh) && relative_block == nblocks) || */ /* (first_hole != blocks_per_page)) */ /* bio = mpage_bio_submit(READ, bio); */ /* else */ /* *last_block_in_bio = blocks[blocks_per_page - 1]; */ out: return bio; confused: printk(KERN_INFO "\nCONFUSED !"); if (bio) bio = mpage_bio_submit(READ, bio); if (!PageUptodate(page)) block_read_full_page(page, get_block); else unlock_page(page); goto out; }
int ux_readpage(struct file *file, struct page *page) { return block_read_full_page(page, ux_get_block); }