int nilfs_gccache_wait_and_mark_dirty(struct buffer_head *bh) { wait_on_buffer(bh); if (!buffer_uptodate(bh)) return -EIO; if (buffer_dirty(bh)) return -EEXIST; if (buffer_nilfs_node(bh)) nilfs_btnode_mark_dirty(bh); else nilfs_mdt_mark_buffer_dirty(bh); return 0; }
int nilfs_gccache_wait_and_mark_dirty(struct buffer_head *bh) { wait_on_buffer(bh); if (!buffer_uptodate(bh)) return -EIO; if (buffer_dirty(bh)) return -EEXIST; if (buffer_nilfs_node(bh) && nilfs_btree_broken_node_block(bh)) { clear_buffer_uptodate(bh); return -EIO; } mark_buffer_dirty(bh); return 0; }
struct buffer_head *nilfs_grab_buffer(struct inode *inode, struct address_space *mapping, unsigned long blkoff, unsigned long b_state) { int blkbits = inode->i_blkbits; pgoff_t index = blkoff >> (PAGE_CACHE_SHIFT - blkbits); struct page *page, *opage; struct buffer_head *bh, *obh; page = grab_cache_page(mapping, index); if (unlikely(!page)) return NULL; bh = __nilfs_get_page_block(page, blkoff, index, blkbits, b_state); if (unlikely(!bh)) { unlock_page(page); page_cache_release(page); return NULL; } if (!buffer_uptodate(bh) && mapping->assoc_mapping != NULL) { /* * Shadow page cache uses assoc_mapping to point its original * page cache. The following code tries the original cache * if the given cache is a shadow and it didn't hit. */ opage = find_lock_page(mapping->assoc_mapping, index); if (!opage) return bh; obh = __nilfs_get_page_block(opage, blkoff, index, blkbits, b_state); if (buffer_uptodate(obh)) { nilfs_copy_buffer(bh, obh); if (buffer_dirty(obh)) { nilfs_mark_buffer_dirty(bh); if (!buffer_nilfs_node(bh) && NILFS_MDT(inode)) nilfs_mdt_mark_dirty(inode); } } brelse(obh); unlock_page(opage); page_cache_release(opage); } return bh; }
void nilfs_page_debug(const char *fname, int line, struct page *page, const char *m, ...) { struct address_space *mapping; struct inode *inode; va_list args; int len; char b[MSIZ]; /* The page should be locked */ len = snprintf(b, MSIZ, "PAGE %p ", page); va_start(args, m); len += vsnprintf(b + len, MSIZ - len, m, args); va_end(args); if (page == NULL) { printk(KERN_DEBUG "%s: page=NULL %s at %d\n", b, fname, line); return; } mapping = page->mapping; len += snprintf(b + len, MSIZ - len, ": cnt=%d index#=%llu mapping=%d lru=%d", atomic_read(&page->_count), (unsigned long long)page->index, !!mapping, !list_empty(&page->lru)); len += snprintf(b + len, MSIZ - len, " %s(%d) flags=", fname, line); len += snprint_page_flags(b + len, MSIZ - len, page); if (mapping) { if (buffer_nilfs_node(page_buffers(page))) inode = NILFS_BTNC_I(mapping); else inode = NILFS_AS_I(mapping); if (inode != NULL) len += snprintf(b + len, MSIZ - len, " ino=%lu", inode->i_ino); } printk(KERN_DEBUG "%s\n", b); if (page_has_buffers(page)) { struct buffer_head *bh, *head; int i = 0; bh = head = page_buffers(page); if (!bh) { printk(KERN_DEBUG "PAGE %p: invalid page buffers\n", page); return; } do { len = snprintf(b, MSIZ, " BH[%d] %p: cnt=%d blk#=%llu state=", i, bh, atomic_read(&bh->b_count), (unsigned long long)bh->b_blocknr); len += snprint_bh_state(b + len, MSIZ - len, bh); printk(KERN_DEBUG "%s\n", b); bh = bh->b_this_page; i++; if (unlikely(!bh)) { printk(KERN_DEBUG "PAGE %p: unexpected buffers end\n", page); break; } } while (bh != head); } }