/* * support function for mpage_readpages. The fs supplied get_block might * return an up to date buffer. This is used to map that buffer into * the page, which allows readpage to avoid triggering a duplicate call * to get_block. * * The idea is to avoid adding buffers to pages that don't already have * them. So when the buffer is up to date and the page size == block size, * this marks the page up to date instead of adding new buffers. */ static void map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block) { struct inode *inode = page->mapping->host; struct buffer_head *page_bh, *head; int block = 0; if (!page_has_buffers(page)) { /* * don't make any buffers if there is only one buffer on * the page and the page just needs to be set up to date */ if (inode->i_blkbits == PAGE_CACHE_SHIFT && buffer_uptodate(bh)) { SetPageUptodate(page); return; } create_empty_buffers(page, 1 << inode->i_blkbits, 0); } head = page_buffers(page); page_bh = head; do { if (block == page_block) { page_bh->b_state = bh->b_state; page_bh->b_bdev = bh->b_bdev; page_bh->b_blocknr = bh->b_blocknr; break; } page_bh = page_bh->b_this_page; block++; } while (page_bh != head); }
static int lfs_readpage(struct file *file, struct page *page) { struct inode *inode = page->mapping->host; sector_t iblock, block; unsigned int blocksize; struct buffer_head *bh, *head; blocksize = 1 << inode->i_blkbits; if (!page_has_buffers(page)) create_empty_buffers(page, blocksize, 0); head = page_buffers(page); bh = head; iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); do { struct buffer_head *bh_temp; block = lfs_disk_block(inode, iblock); dprintk("searching for block %Lu in segments: ", (long long unsigned int)block); bh_temp = lfs_read_block(inode, iblock); if(bh_temp) { dprintk("FOUND\n"); memcpy(bh->b_data, bh_temp->b_data, LFS_BSIZE); set_buffer_uptodate(bh); brelse(bh_temp); } else dprintk("NOT FOUND\n"); } while (iblock++, (bh = bh->b_this_page) != head); return block_read_full_page(page, lfs_map_block); }
/** * prepare the blocks and map them * @param inode inode * @param page page pointer * @param from start offset within page * @param to last offset within page * @param get_block get_block funciton * @return return 0 on success, errno on failure */ int rfs_block_prepare_write(struct inode * inode, struct page * page, unsigned from, unsigned to, get_block_t *get_block) { struct buffer_head *bh, *head; unsigned long block; unsigned block_start, block_end, blocksize, bbits; int err = 0; char *kaddr = kmap(page); bbits = inode->i_blkbits; blocksize = 1 << bbits; if (!page->buffers) create_empty_buffers(page, inode->i_dev, blocksize); head = page->buffers; block = page->index << (PAGE_CACHE_SHIFT - bbits); /* start block # */ /* we allocate buffers and map them */ for(bh = head, block_start = 0; bh != head || !block_start; block++, block_start = block_end + 1, bh = bh->b_this_page) { if (!bh) { err = -EIO; RFS_BUG("can't get buffer head\n"); goto out; } block_end = block_start + blocksize - 1; if (block_end < from) { continue; } else if (block_start > to) { break; } clear_bit(BH_New, &bh->b_state); /* map new buffer if necessary*/ if (!buffer_mapped(bh) || (inode->i_size <= (block<<(inode->i_sb->s_blocksize_bits)))) { err = get_block(inode, block, bh, 1); if (err) { DEBUG(DL1, "no block\n"); goto out; } if (buffer_new(bh) && block_end > to) { memset(kaddr+to+1, 0, block_end-to); continue; } } if (!buffer_uptodate(bh) && (block_start < from || block_end > to)) { ll_rw_block(READ, 1, &bh); wait_on_buffer(bh); if (!buffer_uptodate(bh)) { err = -EIO; goto out; } } } out: flush_dcache_page(page); kunmap_atomic(kaddr, KM_USER0); return err; }
static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh, u64 block, struct page *page) { struct inode *inode = &ip->i_inode; struct buffer_head *bh; int release = 0; if (!page || page->index) { page = grab_cache_page(inode->i_mapping, 0); if (!page) return -ENOMEM; release = 1; } if (!PageUptodate(page)) { void *kaddr = kmap(page); u64 dsize = i_size_read(inode); if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode))) dsize = dibh->b_size - sizeof(struct gfs2_dinode); memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize); memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize); kunmap(page); SetPageUptodate(page); } if (!page_has_buffers(page)) create_empty_buffers(page, 1 << inode->i_blkbits, (1 << BH_Uptodate)); bh = page_buffers(page); if (!buffer_mapped(bh)) map_bh(bh, inode->i_sb, block); set_buffer_uptodate(bh); if (!gfs2_is_jdata(ip)) mark_buffer_dirty(bh); if (!gfs2_is_writeback(ip)) gfs2_trans_add_bh(ip->i_gl, bh, 0); if (release) { unlock_page(page); page_cache_release(page); } return 0; }
static int gfs2_writepage(struct page *page, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); loff_t i_size = i_size_read(inode); pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; unsigned offset; int error; int done_trans = 0; if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) { unlock_page(page); return -EIO; } if (current->journal_info) goto out_ignore; /* Is the page fully outside i_size? (truncate in progress) */ offset = i_size & (PAGE_CACHE_SIZE-1); if (page->index > end_index || (page->index == end_index && !offset)) { page->mapping->a_ops->invalidatepage(page, 0); unlock_page(page); return 0; /* don't care */ } if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED || gfs2_is_jdata(ip)) { error = gfs2_trans_begin(sdp, RES_DINODE + 1, 0); if (error) goto out_ignore; if (!page_has_buffers(page)) { create_empty_buffers(page, inode->i_sb->s_blocksize, (1 << BH_Dirty)|(1 << BH_Uptodate)); } gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1); done_trans = 1; } error = block_write_full_page(page, gfs2_get_block_noalloc, wbc); if (done_trans) gfs2_trans_end(sdp); gfs2_meta_cache_flush(ip); return error; out_ignore: redirty_page_for_writepage(wbc, page); unlock_page(page); return 0; }
static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); if (PageChecked(page)) { ClearPageChecked(page); if (!page_has_buffers(page)) { create_empty_buffers(page, inode->i_sb->s_blocksize, (1 << BH_Dirty)|(1 << BH_Uptodate)); } gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1); } return block_write_full_page(page, gfs2_get_block_noalloc, wbc); }
static int gfs2_ordered_writepage(struct page *page, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; struct gfs2_inode *ip = GFS2_I(inode); int ret; ret = gfs2_writepage_common(page, wbc); if (ret <= 0) return ret; if (!page_has_buffers(page)) { create_empty_buffers(page, inode->i_sb->s_blocksize, (1 << BH_Dirty)|(1 << BH_Uptodate)); } gfs2_page_add_databufs(ip, page, 0, inode->i_sb->s_blocksize-1); return block_write_full_page(page, gfs2_get_block_noalloc, wbc); }
static struct buffer_head * __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index, int blkbits, unsigned long b_state) { unsigned long first_block; struct buffer_head *bh; if (!page_has_buffers(page)) create_empty_buffers(page, 1 << blkbits, b_state); first_block = (unsigned long)index << (PAGE_SHIFT - blkbits); bh = nilfs_page_get_nth_block(page, block - first_block); touch_buffer(bh); wait_on_buffer(bh); return bh; }
struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create) { struct address_space *mapping = gl->gl_aspace->i_mapping; struct gfs2_sbd *sdp = gl->gl_sbd; struct page *page; struct buffer_head *bh; unsigned int shift; unsigned long index; unsigned int bufnum; shift = PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift; index = blkno >> shift; /* convert block to page */ bufnum = blkno - (index << shift); /* block buf index within page */ if (create) { for (;;) { page = grab_cache_page(mapping, index); if (page) break; yield(); } } else { page = find_lock_page(mapping, index); if (!page) return NULL; } if (!page_has_buffers(page)) create_empty_buffers(page, sdp->sd_sb.sb_bsize, 0); /* Locate header for our buffer within our page */ for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page) /* Do nothing */; get_bh(bh); if (!buffer_mapped(bh)) map_bh(bh, sdp->sd_vfs, blkno); unlock_page(page); mark_page_accessed(page); page_cache_release(page); return bh; }
/** * nilfs_copy_page -- copy the page with buffers * @dst: destination page * @src: source page * @copy_dirty: flag whether to copy dirty states on the page's buffer heads. * * This function is for both data pages and btnode pages. The dirty flag * should be treated by caller. The page must not be under i/o. * Both src and dst page must be locked */ static void nilfs_copy_page(struct page *dst, struct page *src, int copy_dirty) { struct buffer_head *dbh, *dbufs, *sbh, *sbufs; unsigned long mask = NILFS_BUFFER_INHERENT_BITS; BUG_ON(PageWriteback(dst)); sbh = sbufs = page_buffers(src); if (!page_has_buffers(dst)) create_empty_buffers(dst, sbh->b_size, 0); if (copy_dirty) mask |= BIT(BH_Dirty); dbh = dbufs = page_buffers(dst); do { lock_buffer(sbh); lock_buffer(dbh); dbh->b_state = sbh->b_state & mask; dbh->b_blocknr = sbh->b_blocknr; dbh->b_bdev = sbh->b_bdev; sbh = sbh->b_this_page; dbh = dbh->b_this_page; } while (dbh != dbufs); copy_highpage(dst, src); if (PageUptodate(src) && !PageUptodate(dst)) SetPageUptodate(dst); else if (!PageUptodate(src) && PageUptodate(dst)) ClearPageUptodate(dst); if (PageMappedToDisk(src) && !PageMappedToDisk(dst)) SetPageMappedToDisk(dst); else if (!PageMappedToDisk(src) && PageMappedToDisk(dst)) ClearPageMappedToDisk(dst); do { unlock_buffer(sbh); unlock_buffer(dbh); sbh = sbh->b_this_page; dbh = dbh->b_this_page; } while (dbh != dbufs); }
static inline void print_buffers(struct page *page, sector_t block) { struct buffer_head *bh, *head; if(!page_has_buffers(page)) { printk("Warning: page doesn't have buffers, not sure how that happened, allocating buffer !!!\n"); create_empty_buffers(page, LFS_BSIZE, 0); bh = head = page_buffers(page); do { map_bh(bh, page->mapping->host->i_sb, block++); bh = bh->b_this_page; } while(bh != head); } bh = head = page_buffers(page); do { if(!buffer_mapped(bh)) dprintk("The buffer seems to be not mapped ??"); //dprintk("mapped to blocknr = %Lu\n", bh->b_blocknr); bh = bh->b_this_page; } while(bh != head); }
static int tux3_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) { struct inode *inode = file_inode(vma->vm_file); struct sb *sb = tux_sb(inode->i_sb); struct page *clone, *page = vmf->page; void *ptr; int ret; sb_start_pagefault(inode->i_sb); retry: down_read(&tux_inode(inode)->truncate_lock); lock_page(page); if (page->mapping != mapping(inode)) { unlock_page(page); ret = VM_FAULT_NOPAGE; goto out; } /* * page fault can be happened while holding change_begin/end() * (e.g. copy of user data between ->write_begin and * ->write_end for write(2)). * * So, we use nested version here. */ change_begin_atomic_nested(sb, &ptr); /* * FIXME: Caller releases vmf->page (old_page) unconditionally. * So, this takes additional refcount to workaround it. */ if (vmf->page == page) page_cache_get(page); clone = pagefork_for_blockdirty(page, tux3_get_current_delta()); if (IS_ERR(clone)) { /* Someone did page fork */ pgoff_t index = page->index; change_end_atomic_nested(sb, ptr); unlock_page(page); page_cache_release(page); up_read(&tux_inode(inode)->truncate_lock); switch (PTR_ERR(clone)) { case -EAGAIN: page = find_get_page(inode->i_mapping, index); assert(page); goto retry; case -ENOMEM: ret = VM_FAULT_OOM; break; default: ret = VM_FAULT_SIGBUS; break; } goto out; } file_update_time(vma->vm_file); /* Assign buffers to dirty */ if (!page_has_buffers(clone)) create_empty_buffers(clone, sb->blocksize, 0); /* * We mark the page dirty already here so that when freeze is in * progress, we are guaranteed that writeback during freezing will * see the dirty page and writeprotect it again. */ tux3_set_page_dirty(clone); #if 1 /* FIXME: Caller doesn't see the changed vmf->page */ vmf->page = clone; change_end_atomic_nested(sb, ptr); /* FIXME: caller doesn't know about pagefork */ unlock_page(clone); page_cache_release(clone); ret = 0; // ret = VM_FAULT_LOCKED; #endif out: up_read(&tux_inode(inode)->truncate_lock); sb_end_pagefault(inode->i_sb); return ret; }