/** * nilfs_copy_buffer -- copy buffer data and flags * @dbh: destination buffer * @sbh: source buffer */ void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh) { void *kaddr0, *kaddr1; unsigned long bits; struct page *spage = sbh->b_page, *dpage = dbh->b_page; struct buffer_head *bh; kaddr0 = kmap_atomic(spage, KM_USER0); kaddr1 = kmap_atomic(dpage, KM_USER1); memcpy(kaddr1 + bh_offset(dbh), kaddr0 + bh_offset(sbh), sbh->b_size); kunmap_atomic(kaddr1, KM_USER1); kunmap_atomic(kaddr0, KM_USER0); dbh->b_state = sbh->b_state & NILFS_BUFFER_INHERENT_BITS; dbh->b_blocknr = sbh->b_blocknr; dbh->b_bdev = sbh->b_bdev; bh = dbh; bits = sbh->b_state & ((1UL << BH_Uptodate) | (1UL << BH_Mapped)); while ((bh = bh->b_this_page) != dbh) { lock_buffer(bh); bits &= bh->b_state; unlock_buffer(bh); } if (bits & (1UL << BH_Uptodate)) SetPageUptodate(dpage); else ClearPageUptodate(dpage); if (bits & (1UL << BH_Mapped)) SetPageMappedToDisk(dpage); else ClearPageMappedToDisk(dpage); }
void nilfs_clear_dirty_pages(struct address_space *mapping) { struct pagevec pvec; unsigned int i; pgoff_t index = 0; pagevec_init(&pvec, 0); while (pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY, PAGEVEC_SIZE)) { for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; struct buffer_head *bh, *head; lock_page(page); ClearPageUptodate(page); ClearPageMappedToDisk(page); bh = head = page_buffers(page); do { lock_buffer(bh); clear_buffer_dirty(bh); clear_buffer_nilfs_volatile(bh); clear_buffer_uptodate(bh); clear_buffer_mapped(bh); unlock_buffer(bh); bh = bh->b_this_page; } while (bh != head); __nilfs_clear_page_dirty(page); unlock_page(page); } pagevec_release(&pvec); cond_resched(); } }
static void orangefs_invalidatepage(struct page *page, unsigned int offset, unsigned int length) { gossip_debug(GOSSIP_INODE_DEBUG, "orangefs_invalidatepage called on page %p " "(offset is %u)\n", page, offset); ClearPageUptodate(page); ClearPageMappedToDisk(page); return; }
static void pvfs2_invalidatepage(struct page *page, unsigned long offset) #endif { gossip_debug(GOSSIP_INODE_DEBUG, "pvfs2_invalidatepage called on page %p " "(offset is %lu)\n", page, offset); ClearPageUptodate(page); ClearPageMappedToDisk(page); #ifdef HAVE_INT_RETURN_ADDRESS_SPACE_OPERATIONS_INVALIDATEPAGE return 0; #else return; #endif }
/* * If truncate cannot remove the fs-private metadata from the page, the page * becomes anonymous. It will be left on the LRU and may even be mapped into * user pagetables if we're racing with filemap_nopage(). * * We need to bale out if page->mapping is no longer equal to the original * mapping. This happens a) when the VM reclaimed the page while we waited on * its lock, b) when a concurrent invalidate_inode_pages got there first and * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space. */ static void truncate_complete_page(struct address_space *mapping, struct page *page) { if (page->mapping != mapping) return; if (PagePrivate(page)) do_invalidatepage(page, 0); clear_page_dirty(page); ClearPageUptodate(page); ClearPageMappedToDisk(page); remove_from_page_cache(page); page_cache_release(page); /* pagecache ref */ }
/* * If truncate cannot remove the fs-private metadata from the page, the page * becomes orphaned. It will be left on the LRU and may even be mapped into * user pagetables if we're racing with filemap_fault(). * * We need to bale out if page->mapping is no longer equal to the original * mapping. This happens a) when the VM reclaimed the page while we waited on * its lock, b) when a concurrent invalidate_mapping_pages got there first and * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space. */ static int truncate_complete_page(struct address_space *mapping, struct page *page) { if (page->mapping != mapping) return -EIO; if (page_has_private(page)) do_invalidatepage(page, 0); cancel_dirty_page(page, PAGE_CACHE_SIZE); ClearPageMappedToDisk(page); delete_from_page_cache(page); return 0; }
/** * nilfs_forget_buffer - discard dirty state * @inode: owner inode of the buffer * @bh: buffer head of the buffer to be discarded */ void nilfs_forget_buffer(struct buffer_head *bh) { struct page *page = bh->b_page; lock_buffer(bh); clear_buffer_nilfs_volatile(bh); clear_buffer_dirty(bh); if (nilfs_page_buffers_clean(page)) __nilfs_clear_page_dirty(page); clear_buffer_uptodate(bh); clear_buffer_mapped(bh); bh->b_blocknr = -1; ClearPageUptodate(page); ClearPageMappedToDisk(page); unlock_buffer(bh); brelse(bh); }
/** * nilfs_forget_buffer - discard dirty state * @inode: owner inode of the buffer * @bh: buffer head of the buffer to be discarded */ void nilfs_forget_buffer(struct buffer_head *bh) { struct page *page = bh->b_page; const unsigned long clear_bits = (BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) | BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) | BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected)); lock_buffer(bh); set_mask_bits(&bh->b_state, clear_bits, 0); if (nilfs_page_buffers_clean(page)) __nilfs_clear_page_dirty(page); bh->b_blocknr = -1; ClearPageUptodate(page); ClearPageMappedToDisk(page); unlock_buffer(bh); brelse(bh); }
/** * nilfs_copy_page -- copy the page with buffers * @dst: destination page * @src: source page * @copy_dirty: flag whether to copy dirty states on the page's buffer heads. * * This function is for both data pages and btnode pages. The dirty flag * should be treated by caller. The page must not be under i/o. * Both src and dst page must be locked */ static void nilfs_copy_page(struct page *dst, struct page *src, int copy_dirty) { struct buffer_head *dbh, *dbufs, *sbh, *sbufs; unsigned long mask = NILFS_BUFFER_INHERENT_BITS; BUG_ON(PageWriteback(dst)); sbh = sbufs = page_buffers(src); if (!page_has_buffers(dst)) create_empty_buffers(dst, sbh->b_size, 0); if (copy_dirty) mask |= BIT(BH_Dirty); dbh = dbufs = page_buffers(dst); do { lock_buffer(sbh); lock_buffer(dbh); dbh->b_state = sbh->b_state & mask; dbh->b_blocknr = sbh->b_blocknr; dbh->b_bdev = sbh->b_bdev; sbh = sbh->b_this_page; dbh = dbh->b_this_page; } while (dbh != dbufs); copy_highpage(dst, src); if (PageUptodate(src) && !PageUptodate(dst)) SetPageUptodate(dst); else if (!PageUptodate(src) && PageUptodate(dst)) ClearPageUptodate(dst); if (PageMappedToDisk(src) && !PageMappedToDisk(dst)) SetPageMappedToDisk(dst); else if (!PageMappedToDisk(src) && PageMappedToDisk(dst)) ClearPageMappedToDisk(dst); do { unlock_buffer(sbh); unlock_buffer(dbh); sbh = sbh->b_this_page; dbh = dbh->b_this_page; } while (dbh != dbufs); }
/* * If truncate cannot remove the fs-private metadata from the page, the page * becomes orphaned. It will be left on the LRU and may even be mapped into * user pagetables if we're racing with filemap_fault(). * * We need to bale out if page->mapping is no longer equal to the original * mapping. This happens a) when the VM reclaimed the page while we waited on * its lock, b) when a concurrent invalidate_mapping_pages got there first and * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space. */ static int truncate_complete_page(struct address_space *mapping, struct page *page) { if (page->mapping != mapping) return -EIO; if (page_has_private(page)) do_invalidatepage(page, 0, PAGE_CACHE_SIZE); /* * Some filesystems seem to re-dirty the page even after * the VM has canceled the dirty bit (eg ext3 journaling). * Hence dirty accounting check is placed after invalidation. */ cancel_dirty_page(page); ClearPageMappedToDisk(page); delete_from_page_cache(page); return 0; }
/** * nilfs_clear_dirty_page - discard dirty page * @page: dirty page that will be discarded * @silent: suppress [true] or print [false] warning messages */ void nilfs_clear_dirty_page(struct page *page, bool silent) { struct inode *inode = page->mapping->host; struct super_block *sb = inode->i_sb; BUG_ON(!PageLocked(page)); if (!silent) { nilfs_warning(sb, __func__, "discard page: offset %lld, ino %lu", page_offset(page), inode->i_ino); } ClearPageUptodate(page); ClearPageMappedToDisk(page); if (page_has_buffers(page)) { struct buffer_head *bh, *head; bh = head = page_buffers(page); do { lock_buffer(bh); if (!silent) { nilfs_warning(sb, __func__, "discard block %llu, size %zu", (u64)bh->b_blocknr, bh->b_size); } clear_buffer_async_write(bh); clear_buffer_dirty(bh); clear_buffer_nilfs_volatile(bh); clear_buffer_nilfs_checked(bh); clear_buffer_nilfs_redirected(bh); clear_buffer_uptodate(bh); clear_buffer_mapped(bh); unlock_buffer(bh); } while (bh = bh->b_this_page, bh != head); } __nilfs_clear_page_dirty(page); }
/** * nilfs_clear_dirty_page - discard dirty page * @page: dirty page that will be discarded * @silent: suppress [true] or print [false] warning messages */ void nilfs_clear_dirty_page(struct page *page, bool silent) { struct inode *inode = page->mapping->host; struct super_block *sb = inode->i_sb; BUG_ON(!PageLocked(page)); if (!silent) { nilfs_warning(sb, __func__, "discard page: offset %lld, ino %lu", page_offset(page), inode->i_ino); } ClearPageUptodate(page); ClearPageMappedToDisk(page); if (page_has_buffers(page)) { struct buffer_head *bh, *head; const unsigned long clear_bits = (1 << BH_Uptodate | 1 << BH_Dirty | 1 << BH_Mapped | 1 << BH_Async_Write | 1 << BH_NILFS_Volatile | 1 << BH_NILFS_Checked | 1 << BH_NILFS_Redirected); bh = head = page_buffers(page); do { lock_buffer(bh); if (!silent) { nilfs_warning(sb, __func__, "discard block %llu, size %zu", (u64)bh->b_blocknr, bh->b_size); } set_mask_bits(&bh->b_state, clear_bits, 0); unlock_buffer(bh); } while (bh = bh->b_this_page, bh != head); } __nilfs_clear_page_dirty(page); }
/** * nilfs_clear_dirty_page - discard dirty page * @page: dirty page that will be discarded * @silent: suppress [true] or print [false] warning messages */ void nilfs_clear_dirty_page(struct page *page, bool silent) { struct inode *inode = page->mapping->host; struct super_block *sb = inode->i_sb; BUG_ON(!PageLocked(page)); if (!silent) nilfs_msg(sb, KERN_WARNING, "discard dirty page: offset=%lld, ino=%lu", page_offset(page), inode->i_ino); ClearPageUptodate(page); ClearPageMappedToDisk(page); if (page_has_buffers(page)) { struct buffer_head *bh, *head; const unsigned long clear_bits = (BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) | BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) | BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected)); bh = head = page_buffers(page); do { lock_buffer(bh); if (!silent) nilfs_msg(sb, KERN_WARNING, "discard dirty block: blocknr=%llu, size=%zu", (u64)bh->b_blocknr, bh->b_size); set_mask_bits(&bh->b_state, clear_bits, 0); unlock_buffer(bh); } while (bh = bh->b_this_page, bh != head); } __nilfs_clear_page_dirty(page); }
static int capfs_invalidatepage(struct page* page, unsigned long offset) { ClearPageUptodate(page); ClearPageMappedToDisk(page); return 0; }