/* * support function for mpage_readpages. The fs supplied get_block might * return an up to date buffer. This is used to map that buffer into * the page, which allows readpage to avoid triggering a duplicate call * to get_block. * * The idea is to avoid adding buffers to pages that don't already have * them. So when the buffer is up to date and the page size == block size, * this marks the page up to date instead of adding new buffers. */ static void map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block) { struct inode *inode = page->mapping->host; struct buffer_head *page_bh, *head; int block = 0; if (!page_has_buffers(page)) { /* * don't make any buffers if there is only one buffer on * the page and the page just needs to be set up to date */ if (inode->i_blkbits == PAGE_CACHE_SHIFT && buffer_uptodate(bh)) { SetPageUptodate(page); return; } create_empty_buffers(page, 1 << inode->i_blkbits, 0); } head = page_buffers(page); page_bh = head; do { if (block == page_block) { page_bh->b_state = bh->b_state; page_bh->b_bdev = bh->b_bdev; page_bh->b_blocknr = bh->b_blocknr; break; } page_bh = page_bh->b_this_page; block++; } while (page_bh != head); }
/* * If a ramdisk page has buffers, some may be uptodate and some may be not. * To bring the page uptodate we zero out the non-uptodate buffers. The * page must be locked. */ static void make_page_uptodate(struct page *page) { if (page_has_buffers(page)) { struct buffer_head *bh = page_buffers(page); struct buffer_head *head = bh; do { if (!buffer_uptodate(bh)) { memset(bh->b_data, 0, bh->b_size); /* * akpm: I'm totally undecided about this. The * buffer has just been magically brought "up to * date", but nobody should want to be reading * it anyway, because it hasn't been used for * anything yet. It is still in a "not read * from disk yet" state. * * But non-uptodate buffers against an uptodate * page are against the rules. So do it anyway. */ set_buffer_uptodate(bh); } } while ((bh = bh->b_this_page) != head); } else { memset(page_address(page), 0, PAGE_CACHE_SIZE); } flush_dcache_page(page); SetPageUptodate(page); }
void nilfs_page_bug(struct page *page) { struct address_space *m; unsigned long ino; if (unlikely(!page)) { printk(KERN_CRIT "NILFS_PAGE_BUG(NULL)\n"); return; } m = page->mapping; ino = m ? m->host->i_ino : 0; printk(KERN_CRIT "NILFS_PAGE_BUG(%p): cnt=%d index#=%llu flags=0x%lx " "mapping=%p ino=%lu\n", page, page_ref_count(page), (unsigned long long)page->index, page->flags, m, ino); if (page_has_buffers(page)) { struct buffer_head *bh, *head; int i = 0; bh = head = page_buffers(page); do { printk(KERN_CRIT " BH[%d] %p: cnt=%d block#=%llu state=0x%lx\n", i++, bh, atomic_read(&bh->b_count), (unsigned long long)bh->b_blocknr, bh->b_state); bh = bh->b_this_page; } while (bh != head); } }
void jbd2_journal_invalidatepage(journal_t *journal, struct page *page, unsigned long offset) { struct buffer_head *head, *bh, *next; unsigned int curr_off = 0; int may_free = 1; if (!PageLocked(page)) BUG(); if (!page_has_buffers(page)) return; head = bh = page_buffers(page); do { unsigned int next_off = curr_off + bh->b_size; next = bh->b_this_page; if (offset <= curr_off) { lock_buffer(bh); may_free &= journal_unmap_buffer(journal, bh); unlock_buffer(bh); } curr_off = next_off; bh = next; } while (bh != head); if (!offset) { if (may_free && try_to_free_buffers(page)) J_ASSERT(!page_has_buffers(page)); } }
static void gfs2_invalidatepage(struct page *page, unsigned int offset, unsigned int length) { struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); unsigned int stop = offset + length; int partial_page = (offset || length < PAGE_CACHE_SIZE); struct buffer_head *bh, *head; unsigned long pos = 0; BUG_ON(!PageLocked(page)); if (!partial_page) ClearPageChecked(page); if (!page_has_buffers(page)) goto out; bh = head = page_buffers(page); do { if (pos + bh->b_size > stop) return; if (offset <= pos) gfs2_discard(sdp, bh); pos += bh->b_size; bh = bh->b_this_page; } while (bh != head); out: if (!partial_page) try_to_release_page(page, 0); }
static void unmap_buffers(struct page *page, loff_t pos) { struct buffer_head *bh ; struct buffer_head *head ; struct buffer_head *next ; unsigned long tail_index ; unsigned long cur_index ; if (page) { if (page_has_buffers(page)) { tail_index = pos & (PAGE_CACHE_SIZE - 1) ; cur_index = 0 ; head = page_buffers(page) ; bh = head ; do { next = bh->b_this_page ; /* we want to unmap the buffers that contain the tail, and ** all the buffers after it (since the tail must be at the ** end of the file). We don't want to unmap file data ** before the tail, since it might be dirty and waiting to ** reach disk */ cur_index += bh->b_size ; if (cur_index > tail_index) { reiserfs_unmap_buffer(bh) ; } bh = next ; } while (bh != head) ; if ( PAGE_SIZE == bh->b_size ) { clear_page_dirty(page); } } } }
/** * int jbd2_journal_try_to_free_buffers() - try to free page buffers. * @journal: journal for operation * @page: to try and free * @gfp_mask: we use the mask to detect how hard should we try to release * buffers. If __GFP_WAIT and __GFP_FS is set, we wait for commit code to * release the buffers. * * * For all the buffers on this page, * if they are fully written out ordered data, move them onto BUF_CLEAN * so try_to_free_buffers() can reap them. * * This function returns non-zero if we wish try_to_free_buffers() * to be called. We do this if the page is releasable by try_to_free_buffers(). * We also do it if the page has locked or dirty buffers and the caller wants * us to perform sync or async writeout. * * This complicates JBD locking somewhat. We aren't protected by the * BKL here. We wish to remove the buffer from its committing or * running transaction's ->t_datalist via __jbd2_journal_unfile_buffer. * * This may *change* the value of transaction_t->t_datalist, so anyone * who looks at t_datalist needs to lock against this function. * * Even worse, someone may be doing a jbd2_journal_dirty_data on this * buffer. So we need to lock against that. jbd2_journal_dirty_data() * will come out of the lock with the buffer dirty, which makes it * ineligible for release here. * * Who else is affected by this? hmm... Really the only contender * is do_get_write_access() - it could be looking at the buffer while * journal_try_to_free_buffer() is changing its state. But that * cannot happen because we never reallocate freed data as metadata * while the data is part of a transaction. Yes? * * Return 0 on failure, 1 on success */ int jbd2_journal_try_to_free_buffers(journal_t *journal, struct page *page, gfp_t gfp_mask) { struct buffer_head *head; struct buffer_head *bh; int ret = 0; J_ASSERT(PageLocked(page)); head = page_buffers(page); bh = head; do { struct journal_head *jh; jh = jbd2_journal_grab_journal_head(bh); if (!jh) continue; jbd_lock_bh_state(bh); __journal_try_to_free_buffer(journal, bh); jbd2_journal_put_journal_head(jh); jbd_unlock_bh_state(bh); if (buffer_jbd(bh)) goto busy; } while ((bh = bh->b_this_page) != head); ret = try_to_free_buffers(page); busy: return ret; }
static int nilfs_set_page_dirty(struct page *page) { int ret = __set_page_dirty_nobuffers(page); if (page_has_buffers(page)) { struct inode *inode = page->mapping->host; unsigned nr_dirty = 0; struct buffer_head *bh, *head; /* * This page is locked by callers, and no other thread * concurrently marks its buffers dirty since they are * only dirtied through routines in fs/buffer.c in * which call sites of mark_buffer_dirty are protected * by page lock. */ bh = head = page_buffers(page); do { /* Do not mark hole blocks dirty */ if (buffer_dirty(bh) || !buffer_mapped(bh)) continue; set_buffer_dirty(bh); nr_dirty++; } while (bh = bh->b_this_page, bh != head); if (nr_dirty) nilfs_set_file_dirty(inode, nr_dirty); } return ret; }
/* * We're now finished for good with this page. Update the page state via the * associated buffer_heads, paying attention to the start and end offsets that * we need to process on the page. * * Landmine Warning: bh->b_end_io() will call end_page_writeback() on the last * buffer in the IO. Once it does this, it is unsafe to access the bufferhead or * the page at all, as we may be racing with memory reclaim and it can free both * the bufferhead chain and the page as it will see the page as clean and * unused. */ static void xfs_finish_page_writeback( struct inode *inode, struct bio_vec *bvec, int error) { unsigned int end = bvec->bv_offset + bvec->bv_len - 1; struct buffer_head *head, *bh, *next; unsigned int off = 0; unsigned int bsize; ASSERT(bvec->bv_offset < PAGE_SIZE); ASSERT((bvec->bv_offset & ((1 << inode->i_blkbits) - 1)) == 0); ASSERT(end < PAGE_SIZE); ASSERT((bvec->bv_len & ((1 << inode->i_blkbits) - 1)) == 0); bh = head = page_buffers(bvec->bv_page); bsize = bh->b_size; do { next = bh->b_this_page; if (off < bvec->bv_offset) goto next_bh; if (off > end) break; bh->b_end_io(bh, !error); next_bh: off += bsize; } while ((bh = next) != head); }
static void ext4_finish_bio(struct bio *bio) { int i; struct bio_vec *bvec; bio_for_each_segment_all(bvec, bio, i) { struct page *page = bvec->bv_page; #ifdef CONFIG_EXT4_FS_ENCRYPTION struct page *data_page = NULL; #endif struct buffer_head *bh, *head; unsigned bio_start = bvec->bv_offset; unsigned bio_end = bio_start + bvec->bv_len; unsigned under_io = 0; unsigned long flags; if (!page) continue; #ifdef CONFIG_EXT4_FS_ENCRYPTION if (!page->mapping) { /* The bounce data pages are unmapped. */ data_page = page; fscrypt_pullback_bio_page(&page, false); } #endif if (bio->bi_status) { SetPageError(page); mapping_set_error(page->mapping, -EIO); } bh = head = page_buffers(page); /* * We check all buffers in the page under BH_Uptodate_Lock * to avoid races with other end io clearing async_write flags */ local_irq_save(flags); bit_spin_lock(BH_Uptodate_Lock, &head->b_state); do { if (bh_offset(bh) < bio_start || bh_offset(bh) + bh->b_size > bio_end) { if (buffer_async_write(bh)) under_io++; continue; } clear_buffer_async_write(bh); if (bio->bi_status) buffer_io_error(bh); } while ((bh = bh->b_this_page) != head); bit_spin_unlock(BH_Uptodate_Lock, &head->b_state); local_irq_restore(flags); if (!under_io) { #ifdef CONFIG_EXT4_FS_ENCRYPTION if (data_page) fscrypt_restore_control_page(data_page); #endif end_page_writeback(page); } } }
void nilfs_clear_dirty_pages(struct address_space *mapping) { struct pagevec pvec; unsigned int i; pgoff_t index = 0; pagevec_init(&pvec, 0); while (pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY, PAGEVEC_SIZE)) { for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; struct buffer_head *bh, *head; lock_page(page); ClearPageUptodate(page); ClearPageMappedToDisk(page); bh = head = page_buffers(page); do { lock_buffer(bh); clear_buffer_dirty(bh); clear_buffer_nilfs_volatile(bh); clear_buffer_uptodate(bh); clear_buffer_mapped(bh); unlock_buffer(bh); bh = bh->b_this_page; } while (bh != head); __nilfs_clear_page_dirty(page); unlock_page(page); } pagevec_release(&pvec); cond_resched(); } }
/////////////////////////////////////////////////////////// // TracePageBuffers // // /////////////////////////////////////////////////////////// void TracePageBuffers( IN struct page* page, IN int hdr ) { if ( hdr ) { DebugTrace(+1, UFSD_LEVEL_PAGE_BH, ("p=%p f=%lx:\n", page, page->flags )); } else if ( UFSD_TraceLevel & UFSD_LEVEL_PAGE_BH ) { UFSD_TraceInc( +1 ); } if ( page_has_buffers( page ) ) { struct buffer_head* head = page_buffers(page); struct buffer_head* bh = head; do { if ( (sector_t)-1 == bh->b_blocknr ) { DebugTrace( 0, UFSD_LEVEL_PAGE_BH, ("bh=%p,%lx\n", bh, bh->b_state) ); } else { DebugTrace( 0, UFSD_LEVEL_PAGE_BH, ("bh=%p,%lx,%"PSCT"x\n", bh, bh->b_state, bh->b_blocknr ) ); } bh = bh->b_this_page; } while( bh != head ); } else { DebugTrace(0, UFSD_LEVEL_PAGE_BH, ("no buffers\n" )); } if ( UFSD_TraceLevel & UFSD_LEVEL_PAGE_BH ) UFSD_TraceInc( -1 ); }
handle_t *ocfs2_start_walk_page_trans(struct inode *inode, struct page *page, unsigned from, unsigned to) { struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); handle_t *handle; int ret = 0; handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); if (IS_ERR(handle)) { ret = -ENOMEM; mlog_errno(ret); goto out; } if (ocfs2_should_order_data(inode)) { ret = walk_page_buffers(handle, page_buffers(page), from, to, NULL, ocfs2_journal_dirty_data); if (ret < 0) mlog_errno(ret); } out: if (ret) { if (!IS_ERR(handle)) ocfs2_commit_trans(osb, handle); handle = ERR_PTR(ret); } return handle; }
/* * write out a page to a file */ static void write_page(struct bitmap *bitmap, struct page *page, int wait) { struct buffer_head *bh; if (bitmap->file == NULL) { switch (write_sb_page(bitmap, page, wait)) { case -EINVAL: bitmap->flags |= BITMAP_WRITE_ERROR; } } else { bh = page_buffers(page); while (bh && bh->b_blocknr) { atomic_inc(&bitmap->pending_writes); set_buffer_locked(bh); set_buffer_mapped(bh); submit_bh(WRITE, bh); bh = bh->b_this_page; } if (wait) { wait_event(bitmap->write_wait, atomic_read(&bitmap->pending_writes)==0); } } if (bitmap->flags & BITMAP_WRITE_ERROR) bitmap_file_kick(bitmap); }
int ext4_bio_write_page(struct ext4_io_submit *io, struct page *page, int len, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; unsigned block_start, block_end, blocksize; struct ext4_io_page *io_page; struct buffer_head *bh, *head; int ret = 0; blocksize = 1 << inode->i_blkbits; BUG_ON(PageWriteback(page)); set_page_writeback(page); ClearPageError(page); io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS); if (!io_page) { set_page_dirty(page); unlock_page(page); return -ENOMEM; } io_page->p_page = page; atomic_set(&io_page->p_count, 1); get_page(page); for (bh = head = page_buffers(page), block_start = 0; bh != head || !block_start; block_start = block_end, bh = bh->b_this_page) { block_end = block_start + blocksize; if (block_start >= len) { clear_buffer_dirty(bh); set_buffer_uptodate(bh); continue; } ret = io_submit_add_bh(io, io_page, inode, wbc, bh); if (ret) { /* * We only get here on ENOMEM. Not much else * we can do but mark the page as dirty, and * better luck next time. */ set_page_dirty(page); break; } } unlock_page(page); /* * If the page was truncated before we could do the writeback, * or we had a memory allocation error while trying to write * the first buffer head, we won't have submitted any pages for * I/O. In that case we need to make sure we've cleared the * PageWriteback bit from the page to prevent the system from * wedging later on. */ put_io_page(io_page); return ret; }
static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc) { struct buffer_head *bh, *head; int nr_underway = 0; int write_op = REQ_META | (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC_PLUG : WRITE); BUG_ON(!PageLocked(page)); BUG_ON(!page_has_buffers(page)); head = page_buffers(page); bh = head; do { if (!buffer_mapped(bh)) continue; /* * If it's a fully non-blocking write attempt and we cannot * lock the buffer then redirty the page. Note that this can * potentially cause a busy-wait loop from pdflush and kswapd * activity, but those code paths have their own higher-level * throttling. */ if (wbc->sync_mode != WB_SYNC_NONE) { lock_buffer(bh); } else if (!trylock_buffer(bh)) { redirty_page_for_writepage(wbc, page); continue; } if (test_clear_buffer_dirty(bh)) { mark_buffer_async_write(bh); } else { unlock_buffer(bh); } } while ((bh = bh->b_this_page) != head); /* * The page and its buffers are protected by PageWriteback(), so we can * drop the bh refcounts early. */ BUG_ON(PageWriteback(page)); set_page_writeback(page); do { struct buffer_head *next = bh->b_this_page; if (buffer_async_write(bh)) { submit_bh(write_op, bh); nr_underway++; } bh = next; } while (bh != head); unlock_page(page); if (nr_underway == 0) end_page_writeback(page); return 0; }
static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) { struct page *page = vmf->page; struct inode *inode = vma->vm_file->f_dentry->d_inode; struct nilfs_transaction_info ti; int ret; if (unlikely(nilfs_near_disk_full(inode->i_sb->s_fs_info))) return VM_FAULT_SIGBUS; lock_page(page); if (page->mapping != inode->i_mapping || page_offset(page) >= i_size_read(inode) || !PageUptodate(page)) { unlock_page(page); return VM_FAULT_NOPAGE; } if (PageMappedToDisk(page)) goto mapped; if (page_has_buffers(page)) { struct buffer_head *bh, *head; int fully_mapped = 1; bh = head = page_buffers(page); do { if (!buffer_mapped(bh)) { fully_mapped = 0; break; } } while (bh = bh->b_this_page, bh != head); if (fully_mapped) { SetPageMappedToDisk(page); goto mapped; } } unlock_page(page); ret = nilfs_transaction_begin(inode->i_sb, &ti, 1); if (unlikely(ret)) return VM_FAULT_SIGBUS; ret = block_page_mkwrite(vma, vmf, nilfs_get_block); if (ret != VM_FAULT_LOCKED) { nilfs_transaction_abort(inode->i_sb); return ret; } nilfs_set_file_dirty(inode, 1 << (PAGE_SHIFT - inode->i_blkbits)); nilfs_transaction_commit(inode->i_sb); mapped: wait_on_page_writeback(page); return VM_FAULT_LOCKED; }
/** * nilfs_copy_page -- copy the page with buffers * @dst: destination page * @src: source page * @copy_dirty: flag whether to copy dirty states on the page's buffer heads. * * This function is for both data pages and btnode pages. The dirty flag * should be treated by caller. The page must not be under i/o. * Both src and dst page must be locked */ static void nilfs_copy_page(struct page *dst, struct page *src, int copy_dirty) { struct buffer_head *dbh, *dbufs, *sbh, *sbufs; unsigned long mask = NILFS_BUFFER_INHERENT_BITS; BUG_ON(PageWriteback(dst)); sbh = sbufs = page_buffers(src); if (!page_has_buffers(dst)) create_empty_buffers(dst, sbh->b_size, 0); if (copy_dirty) mask |= BIT(BH_Dirty); dbh = dbufs = page_buffers(dst); do { lock_buffer(sbh); lock_buffer(dbh); dbh->b_state = sbh->b_state & mask; dbh->b_blocknr = sbh->b_blocknr; dbh->b_bdev = sbh->b_bdev; sbh = sbh->b_this_page; dbh = dbh->b_this_page; } while (dbh != dbufs); copy_highpage(dst, src); if (PageUptodate(src) && !PageUptodate(dst)) SetPageUptodate(dst); else if (!PageUptodate(src) && PageUptodate(dst)) ClearPageUptodate(dst); if (PageMappedToDisk(src) && !PageMappedToDisk(dst)) SetPageMappedToDisk(dst); else if (!PageMappedToDisk(src) && PageMappedToDisk(dst)) ClearPageMappedToDisk(dst); do { unlock_buffer(sbh); unlock_buffer(dbh); sbh = sbh->b_this_page; dbh = dbh->b_this_page; } while (dbh != dbufs); }
void nilfs_invalidatepage(struct page *page, unsigned long offset) { struct buffer_head *bh = NULL; if (PagePrivate(page)) { bh = page_buffers(page); BUG_ON(buffer_nilfs_allocated(bh)); } block_invalidatepage(page, offset); }
/** * nilfs_page_buffers_clean - check if a page has dirty buffers or not. * @page: page to be checked * * nilfs_page_buffers_clean() returns zero if the page has dirty buffers. * Otherwise, it returns non-zero value. */ int nilfs_page_buffers_clean(struct page *page) { struct buffer_head *bh, *head; bh = head = page_buffers(page); do { if (buffer_dirty(bh)) return 0; bh = bh->b_this_page; } while (bh != head); return 1; }
static void free_buffers(struct page *page) { struct buffer_head *bh = page_buffers(page); while (bh) { struct buffer_head *next = bh->b_this_page; free_buffer_head(bh); bh = next; } __clear_page_buffers(page); put_page(page); }
int gfs2_releasepage(struct page *page, gfp_t gfp_mask) { struct inode *aspace = page->mapping->host; struct gfs2_sbd *sdp = aspace->i_sb->s_fs_info; struct buffer_head *bh, *head; struct gfs2_bufdata *bd; unsigned long t = jiffies + gfs2_tune_get(sdp, gt_stall_secs) * HZ; if (!page_has_buffers(page)) goto out; head = bh = page_buffers(page); do { while (atomic_read(&bh->b_count)) { if (!atomic_read(&aspace->i_writecount)) return 0; if (!(gfp_mask & __GFP_WAIT)) return 0; if (time_after_eq(jiffies, t)) { stuck_releasepage(bh); /* should we withdraw here? */ return 0; } yield(); } gfs2_assert_warn(sdp, !buffer_pinned(bh)); gfs2_assert_warn(sdp, !buffer_dirty(bh)); gfs2_log_lock(sdp); bd = bh->b_private; if (bd) { gfs2_assert_warn(sdp, bd->bd_bh == bh); gfs2_assert_warn(sdp, list_empty(&bd->bd_list_tr)); gfs2_assert_warn(sdp, !bd->bd_ail); bd->bd_bh = NULL; if (!list_empty(&bd->bd_le.le_list)) bd = NULL; bh->b_private = NULL; } gfs2_log_unlock(sdp); if (bd) kmem_cache_free(gfs2_bufdata_cachep, bd); bh = bh->b_this_page; } while (bh != head); out: return try_to_free_buffers(page); }
static int ufs_alloc_lastblock(struct inode *inode) { int err = 0; struct address_space *mapping = inode->i_mapping; struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi; unsigned lastfrag, i, end; struct page *lastpage; struct buffer_head *bh; lastfrag = (i_size_read(inode) + uspi->s_fsize - 1) >> uspi->s_fshift; if (!lastfrag) goto out; lastfrag--; lastpage = ufs_get_locked_page(mapping, lastfrag >> (PAGE_CACHE_SHIFT - inode->i_blkbits)); if (IS_ERR(lastpage)) { err = -EIO; goto out; } end = lastfrag & ((1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1); bh = page_buffers(lastpage); for (i = 0; i < end; ++i) bh = bh->b_this_page; err = ufs_getfrag_block(inode, lastfrag, bh, 1); if (unlikely(err)) goto out_unlock; if (buffer_new(bh)) { clear_buffer_new(bh); unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); /* * we do not zeroize fragment, because of * if it maped to hole, it already contains zeroes */ set_buffer_uptodate(bh); mark_buffer_dirty(bh); set_page_dirty(lastpage); } out_unlock: ufs_put_locked_page(lastpage); out: return err; }
/* * Internal use only */ __u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *bmap, const struct buffer_head *bh) { struct buffer_head *pbh; __u64 key; key = page_index(bh->b_page) << (PAGE_SHIFT - bmap->b_inode->i_blkbits); for (pbh = page_buffers(bh->b_page); pbh != bh; pbh = pbh->b_this_page) key++; return key; }
static inline void print_buffers(struct page *page, sector_t block) { struct buffer_head *bh, *head; if(!page_has_buffers(page)) { printk("Warning: page doesn't have buffers, not sure how that happened, allocating buffer !!!\n"); create_empty_buffers(page, LFS_BSIZE, 0); bh = head = page_buffers(page); do { map_bh(bh, page->mapping->host->i_sb, block++); bh = bh->b_this_page; } while(bh != head); } bh = head = page_buffers(page); do { if(!buffer_mapped(bh)) dprintk("The buffer seems to be not mapped ??"); //dprintk("mapped to blocknr = %Lu\n", bh->b_blocknr); bh = bh->b_this_page; } while(bh != head); }
static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh, u64 block, struct page *page) { struct inode *inode = &ip->i_inode; struct buffer_head *bh; int release = 0; if (!page || page->index) { page = grab_cache_page(inode->i_mapping, 0); if (!page) return -ENOMEM; release = 1; } if (!PageUptodate(page)) { void *kaddr = kmap(page); u64 dsize = i_size_read(inode); if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode))) dsize = dibh->b_size - sizeof(struct gfs2_dinode); memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize); memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize); kunmap(page); SetPageUptodate(page); } if (!page_has_buffers(page)) create_empty_buffers(page, 1 << inode->i_blkbits, (1 << BH_Uptodate)); bh = page_buffers(page); if (!buffer_mapped(bh)) map_bh(bh, inode->i_sb, block); set_buffer_uptodate(bh); if (!gfs2_is_jdata(ip)) mark_buffer_dirty(bh); if (!gfs2_is_writeback(ip)) gfs2_trans_add_bh(ip->i_gl, bh, 0); if (release) { unlock_page(page); page_cache_release(page); } return 0; }
int ext4_bio_write_page(struct ext4_io_submit *io, struct page *page, int len, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; unsigned block_start, block_end, blocksize; struct ext4_io_page *io_page; struct buffer_head *bh, *head; int ret = 0; blocksize = 1 << inode->i_blkbits; BUG_ON(!PageLocked(page)); BUG_ON(PageWriteback(page)); io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS); if (!io_page) { set_page_dirty(page); unlock_page(page); return -ENOMEM; } io_page->p_page = page; atomic_set(&io_page->p_count, 1); get_page(page); set_page_writeback(page); ClearPageError(page); for (bh = head = page_buffers(page), block_start = 0; bh != head || !block_start; block_start = block_end, bh = bh->b_this_page) { block_end = block_start + blocksize; if (block_start >= len) { zero_user_segment(page, block_start, block_end); clear_buffer_dirty(bh); set_buffer_uptodate(bh); continue; } clear_buffer_dirty(bh); ret = io_submit_add_bh(io, io_page, inode, wbc, bh); if (ret) { set_page_dirty(page); break; } } unlock_page(page); put_io_page(io_page); return ret; }
static void print_buffer(struct page* page) { struct address_space* mapping = page_mapping(page); struct buffer_head *bh, *head; spin_lock(&mapping->private_lock); bh = head = page_buffers(page); printk("buffers:"); do { printk(" state:%lx count:%d", bh->b_state, atomic_read(&bh->b_count)); bh = bh->b_this_page; } while (bh != head); printk("\n"); spin_unlock(&mapping->private_lock); }
static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page, unsigned int from, unsigned int to) { struct buffer_head *head = page_buffers(page); unsigned int bsize = head->b_size; struct buffer_head *bh; unsigned int start, end; for (bh = head, start = 0; bh != head || !start; bh = bh->b_this_page, start = end) { end = start + bsize; if (end <= from || start >= to) continue; gfs2_trans_add_bh(ip->i_gl, bh, 0); } }
unsigned int nilfs_page_count_clean_buffers(struct page *page, unsigned int from, unsigned int to) { unsigned int block_start, block_end; struct buffer_head *bh, *head; unsigned int nc = 0; for (bh = head = page_buffers(page), block_start = 0; bh != head || !block_start; block_start = block_end, bh = bh->b_this_page) { block_end = block_start + bh->b_size; if (block_end > from && block_start < to && !buffer_dirty(bh)) nc++; } return nc; }