static int romfs_readpage(struct file *file, struct page * page) { struct inode *inode = page->mapping->host; loff_t offset, avail, readlen; void *buf; int result = -EIO; page_cache_get(page); lock_kernel(); buf = kmap(page); if (!buf) goto err_out; /* 32 bit warning -- but not for us :) */ offset = page_offset(page); if (offset < i_size_read(inode)) { avail = inode->i_size-offset; readlen = min_t(unsigned long, avail, PAGE_SIZE); if (romfs_copyfrom(inode, buf, ROMFS_I(inode)->i_dataoffset+offset, readlen) == readlen) { if (readlen < PAGE_SIZE) { memset(buf + readlen,0,PAGE_SIZE-readlen); } SetPageUptodate(page); result = 0; } }
/* * When an ext3-ordered file is truncated, it is possible that many pages are * not successfully freed, because they are attached to a committing transaction. * After the transaction commits, these pages are left on the LRU, with no * ->mapping, and with attached buffers. These pages are trivially reclaimable * by the VM, but their apparent absence upsets the VM accounting, and it makes * the numbers in /proc/meminfo look odd. * * So here, we have a buffer which has just come off the forget list. Look to * see if we can strip all buffers from the backing page. * * Called under journal->j_list_lock. The caller provided us with a ref * against the buffer, and we drop that here. */ static void release_buffer_page(struct buffer_head *bh) { struct page *page; if (buffer_dirty(bh)) goto nope; if (atomic_read(&bh->b_count) != 1) goto nope; page = bh->b_page; if (!page) goto nope; if (page->mapping) goto nope; /* OK, it's a truncated page */ if (!trylock_page(page)) goto nope; page_cache_get(page); __brelse(bh); try_to_free_buffers(page); unlock_page(page); page_cache_release(page); return; nope: __brelse(bh); }
/* * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space, * but sets SwapCache flag and private instead of mapping and index. */ int __add_to_swap_cache(struct page *page, swp_entry_t entry) { int error; VM_BUG_ON(!PageLocked(page)); VM_BUG_ON(PageSwapCache(page)); VM_BUG_ON(!PageSwapBacked(page)); page_cache_get(page); SetPageSwapCache(page); set_page_private(page, entry.val); spin_lock_irq(&swapper_space.tree_lock); error = radix_tree_insert(&swapper_space.page_tree, entry.val, page); if (likely(!error)) { total_swapcache_pages++; __inc_zone_page_state(page, NR_FILE_PAGES); INC_CACHE_INFO(add_total); } spin_unlock_irq(&swapper_space.tree_lock); if (unlikely(error)) { /* * Only the context which have set SWAP_HAS_CACHE flag * would call add_to_swap_cache(). * So add_to_swap_cache() doesn't returns -EEXIST. */ VM_BUG_ON(error == -EEXIST); set_page_private(page, 0UL); ClearPageSwapCache(page); page_cache_release(page); } return error; }
/* * Writeback is about to end against a page which has been marked for immediate * reclaim. If it still appears to be reclaimable, move it to the tail of the * inactive list. * * Returns zero if it cleared PG_writeback. */ int rotate_reclaimable_page(struct page *page) { struct pagevec *pvec; unsigned long flags; if (PageLocked(page)) return 1; if (PageDirty(page)) return 1; if (PageActive(page)) return 1; if (!PageLRU(page)) return 1; page_cache_get(page); local_irq_save(flags); pvec = &__get_cpu_var(lru_rotate_pvecs); if (!pagevec_add(pvec, page)) pagevec_move_tail(pvec); local_irq_restore(flags); if (!test_clear_page_writeback(page)) BUG(); return 0; }
/* * Write a page to the server. This will be used for NFS swapping only * (for now), and we currently do this synchronously only. * * We are called with the page locked and we unlock it when done. */ static int smb_writepage(struct page *page, struct writeback_control *wbc) { struct address_space *mapping = page->mapping; struct inode *inode; unsigned long end_index; unsigned offset = PAGE_CACHE_SIZE; int err; BUG_ON(!mapping); inode = mapping->host; BUG_ON(!inode); end_index = inode->i_size >> PAGE_CACHE_SHIFT; /* easy case */ if (page->index < end_index) goto do_it; /* things got complicated... */ offset = inode->i_size & (PAGE_CACHE_SIZE-1); /* OK, are we completely out? */ if (page->index >= end_index+1 || !offset) return 0; /* truncated - don't care */ do_it: page_cache_get(page); err = smb_writepage_sync(inode, page, 0, offset); SetPageUptodate(page); unlock_page(page); page_cache_release(page); return err; }
/* * __add_to_swap_cache resembles add_to_page_cache on swapper_space, * but sets SwapCache flag and private instead of mapping and index. */ static int __add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) { int error; BUG_ON(PageSwapCache(page)); BUG_ON(PagePrivate(page)); error = radix_tree_preload(gfp_mask); if (!error) { write_lock_irq(&swapper_space.tree_lock); error = radix_tree_insert(&swapper_space.page_tree, entry.val, page); if (!error) { page_cache_get(page); SetPageLocked(page); SetPageSwapCache(page); set_page_private(page, entry.val); total_swapcache_pages++; pagecache_acct(1); } write_unlock_irq(&swapper_space.tree_lock); radix_tree_preload_end(); } return error; }
/** * get the page associated with the block inside our dedup structure */ struct page* dedup_get_block_page(sector_t block) { struct page *res = NULL; // Check if the block is inside our range if (dedup_is_in_range(block)) { // Get the page pointer stored inside the dedup structure res = blocksArray.pages[block - start_block]; if (res != NULL) { // If its not NULL, check if the page is up to date and used if (PageLRU(res) && PageUptodate(res)) { page_cache_get(res); } else { // Cannot use page, need to read from bdev blocksArray.pages[block - start_block] = NULL; res = NULL; } } } else printk("get_block_page: block not in range.\n"); return res; }
/* * This routine handles present pages, when users try to write * to a shared page. It is done by copying the page to a new address * and decrementing the shared-page counter for the old page. * * Goto-purists beware: the only reason for goto's here is that it results * in better assembly code.. The "default" path will see no jumps at all. * * Note that this routine assumes that the protection checks have been * done by the caller (the low-level page fault routine in most cases). * Thus we can safely just mark it writable once we've done any necessary * COW. * * We also mark the page dirty at this point even though the page will * change only once the write actually happens. This avoids a few races, * and potentially makes it more efficient. * * We hold the mm semaphore and the page_table_lock on entry and exit * with the page_table_lock released. */ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma, unsigned long address, pte_t *page_table, pte_t pte) { struct page *old_page, *new_page; old_page = pte_page(pte); if (!VALID_PAGE(old_page)) goto bad_wp_page; if (!TryLockPage(old_page)) { int reuse = can_share_swap_page(old_page); unlock_page(old_page); if (reuse) { #ifndef CONFIG_SUPERH /* Not needed for VIPT cache */ flush_cache_page(vma, address); #endif establish_pte(vma, address, page_table, pte_mkyoung(pte_mkdirty(pte_mkwrite(pte)))); spin_unlock(&mm->page_table_lock); return 1; /* Minor fault */ } } /* * Ok, we need to copy. Oh, well.. */ page_cache_get(old_page); spin_unlock(&mm->page_table_lock); new_page = alloc_page(GFP_HIGHUSER); if (!new_page) goto no_mem; copy_cow_page(old_page,new_page,address); /* * Re-check the pte - we dropped the lock */ spin_lock(&mm->page_table_lock); if (pte_same(*page_table, pte)) { if (PageReserved(old_page)) ++mm->rss; break_cow(vma, new_page, address, page_table); lru_cache_add(new_page); /* Free the old page.. */ new_page = old_page; } spin_unlock(&mm->page_table_lock); page_cache_release(new_page); page_cache_release(old_page); return 1; /* Minor fault */ bad_wp_page: spin_unlock(&mm->page_table_lock); printk("do_wp_page: bogus page at address %08lx (page 0x%lx)\n",address,(unsigned long)old_page); return -1; no_mem: page_cache_release(old_page); return -1; }
/* * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space, * but sets SwapCache flag and private instead of mapping and index. */ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) { int error; BUG_ON(!PageLocked(page)); BUG_ON(PageSwapCache(page)); BUG_ON(PagePrivate(page)); BUG_ON(!PageSwapBacked(page)); error = radix_tree_preload(gfp_mask); if (!error) { page_cache_get(page); SetPageSwapCache(page); set_page_private(page, entry.val); spin_lock_irq(&swapper_space.tree_lock); error = radix_tree_insert(&swapper_space.page_tree, entry.val, page); if (likely(!error)) { total_swapcache_pages++; __inc_zone_page_state(page, NR_FILE_PAGES); INC_CACHE_INFO(add_total); } spin_unlock_irq(&swapper_space.tree_lock); radix_tree_preload_end(); if (unlikely(error)) { set_page_private(page, 0UL); ClearPageSwapCache(page); page_cache_release(page); } } return error; }
/** * lru_cache_add: add a page to the page lists * @page: the page to add */ void lru_cache_add(struct page *page) { struct pagevec *pvec = &get_cpu_var(lru_add_pvecs); page_cache_get(page); if (!pagevec_add(pvec, page)) __pagevec_lru_add(pvec); put_cpu_var(lru_add_pvecs); }
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int len, int write, int force, struct page **pages, struct vm_area_struct **vmas) { int i = 0; do { struct vm_area_struct * vma; vma = find_extend_vma(mm, start); if ( !vma || (!force && ((write && (!(vma->vm_flags & VM_WRITE))) || (!write && (!(vma->vm_flags & VM_READ))) ) )) { if (i) return i; return -EFAULT; } spin_lock(&mm->page_table_lock); do { struct page *map; while (!(map = follow_page(mm, start, write))) { spin_unlock(&mm->page_table_lock); switch (handle_mm_fault(mm, vma, start, write)) { case 1: tsk->min_flt++; break; case 2: tsk->maj_flt++; break; case 0: if (i) return i; return -EFAULT; default: if (i) return i; return -ENOMEM; } spin_lock(&mm->page_table_lock); } if (pages) { pages[i] = get_page_map(map); /* FIXME: call the correct function, * depending on the type of the found page */ if (pages[i]) page_cache_get(pages[i]); } if (vmas) vmas[i] = vma; i++; start += PAGE_SIZE; len--; } while(len && start < vma->vm_end); spin_unlock(&mm->page_table_lock); } while(len); return i; }
void fastcall lru_cache_add_active(struct page *page) { struct pagevec *pvec = &get_cpu_var(lru_add_active_pvecs); page_cache_get(page); if (!pagevec_add(pvec, page)) __pagevec_lru_add_active(pvec); put_cpu_var(lru_add_active_pvecs); }
void __lru_cache_add(struct page *page, enum lru_list lru) { struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru]; page_cache_get(page); if (!pagevec_add(pvec, page)) ____pagevec_lru_add(pvec, lru); put_cpu_var(lru_add_pvecs); }
/* * Order of operations is important: flush the pagevec when it's already * full, not when adding the last page, to make sure that last page is * not added to the LRU directly when passed to this function. Because * mark_page_accessed() (called after this when writing) only activates * pages that are on the LRU, linear writes in subpage chunks would see * every PAGEVEC_SIZE page activated, which is unexpected. */ void __lru_cache_add(struct page *page, enum lru_list lru) { struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvecs)[lru]; page_cache_get(page); if (!pagevec_space(pvec)) __pagevec_lru_add(pvec, lru); pagevec_add(pvec, page); put_locked_var(swapvec_lock, lru_add_pvecs); }
/** * __lru_cache_add:page加入到lru类型的lru_add_pvecs页缓存中 */ void __lru_cache_add(struct page *page, enum lru_list lru) { struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru]; page_cache_get(page); /*加入页缓存中,当也缓存满时,才加入到page对应 * 的zone的lru链表中*/ if (!pagevec_add(pvec, page)) ____pagevec_lru_add(pvec, lru); put_cpu_var(lru_add_pvecs); }
/** * lru_cache_add: add a page to the page lists * @page: the page to add */ void fastcall lru_cache_add(struct page *page) { struct pagevec *pvec = &get_cpu_var(lru_add_pvecs); page_cache_get(page); // dyc: if no space left in pvec, add all pages into zone's inactive list if (!pagevec_add(pvec, page)) { __pagevec_lru_add(pvec); } put_cpu_var(lru_add_pvecs); }
void activate_page(struct page *page) { if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); page_cache_get(page); if (!pagevec_add(pvec, page)) pagevec_lru_move_fn(pvec, __activate_page, NULL); put_cpu_var(activate_page_pvecs); } }
/* * We are called with the page locked and we unlock it when done. */ static int smb_readpage(struct file *file, struct page *page) { int error; struct dentry *dentry = file->f_path.dentry; page_cache_get(page); error = smb_readpage_sync(dentry, page); page_cache_release(page); return error; }
void __lru_cache_add(struct page *page, enum lru_list lru) { struct pagevec *pvec; int cpu; pvec = swap_get_cpu_var(lru_add_pvecs, cpu)[lru]; page_cache_get(page); if (!pagevec_add(pvec, page)) ____pagevec_lru_add(pvec, lru); swap_put_cpu_var(lru_add_pvecs, cpu); }
/* * Writeback is about to end against a page which has been marked for immediate * reclaim. If it still appears to be reclaimable, move it to the tail of the * inactive list. */ void rotate_reclaimable_page(struct page *page) { if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) && !PageUnevictable(page) && PageLRU(page)) { struct pagevec *pvec; unsigned long flags; page_cache_get(page); local_irq_save(flags); pvec = &__get_cpu_var(lru_rotate_pvecs); if (!pagevec_add(pvec, page)) pagevec_move_tail(pvec); local_irq_restore(flags); } }
/* * get_kernel_pages() - pin kernel pages in memory * @kiov: An array of struct kvec structures * @nr_segs: number of segments to pin * @write: pinning for read/write, currently ignored * @pages: array that receives pointers to the pages pinned. * Should be at least nr_segs long. * * Returns number of pages pinned. This may be fewer than the number * requested. If nr_pages is 0 or negative, returns 0. If no pages * were pinned, returns -errno. Each page returned must be released * with a put_page() call when it is finished with. */ int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write, struct page **pages) { int seg; for (seg = 0; seg < nr_segs; seg++) { if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE)) return seg; pages[seg] = kmap_to_page(kiov[seg].iov_base); page_cache_get(pages[seg]); } return seg; }
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int len, int write, int force, struct page **pages, struct vm_area_struct **vmas) { int i; static struct vm_area_struct dummy_vma; for (i = 0; i < len; i++) { if (pages) { pages[i] = virt_to_page(start); if (pages[i]) page_cache_get(pages[i]); } if (vmas) vmas[i] = &dummy_vma; start += PAGE_SIZE; } return(i); }
long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int foll_flags, struct page **pages, struct vm_area_struct **vmas, int *nonblocking) { struct vm_area_struct *vma; unsigned long vm_flags; int i; /* calculate required read or write permissions. * If FOLL_FORCE is set, we only require the "MAY" flags. */ vm_flags = (foll_flags & FOLL_WRITE) ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); vm_flags &= (foll_flags & FOLL_FORCE) ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); for (i = 0; i < nr_pages; i++) { vma = find_vma(mm, start); if (!vma) goto finish_or_fault; /* protect what we can, including chardevs */ if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) || !(vm_flags & vma->vm_flags)) goto finish_or_fault; if (pages) { pages[i] = virt_to_page(start); if (pages[i]) page_cache_get(pages[i]); } if (vmas) vmas[i] = vma; start = (start + PAGE_SIZE) & PAGE_MASK; } return i; finish_or_fault: return i ? : -EFAULT; }
void nilfs_btnode_delete(struct buffer_head *bh) { struct address_space *mapping; struct page *page = bh->b_page; pgoff_t index = page_index(page); int still_dirty; page_cache_get(page); lock_page(page); wait_on_page_writeback(page); nilfs_forget_buffer(bh); still_dirty = PageDirty(page); mapping = page->mapping; unlock_page(page); page_cache_release(page); if (!still_dirty && mapping) invalidate_inode_pages2_range(mapping, index, index); }
/** * nfs_create_request - Create an NFS read/write request. * @file: file descriptor to use * @inode: inode to which the request is attached * @page: page to write * @offset: starting offset within the page for the write * @count: number of bytes to read/write * * The page must be locked by the caller. This makes sure we never * create two different requests for the same page, and avoids * a possible deadlock when we reach the hard limit on the number * of dirty pages. * User should ensure it is safe to sleep in this function. */ struct nfs_page * nfs_create_request(struct nfs_open_context *ctx, struct inode *inode, struct page *page, unsigned int offset, unsigned int count) { struct nfs_server *server = NFS_SERVER(inode); struct nfs_page *req; /* Deal with hard limits. */ for (;;) { /* try to allocate the request struct */ req = nfs_page_alloc(); if (req != NULL) break; /* Try to free up at least one request in order to stay * below the hard limit */ if (signalled() && (server->flags & NFS_MOUNT_INTR)) return ERR_PTR(-ERESTARTSYS); yield(); } /* Initialize the request struct. Initially, we assume a * long write-back delay. This will be adjusted in * update_nfs_request below if the region is not locked. */ req->wb_page = page; atomic_set(&req->wb_complete, 0); req->wb_index = page->index; page_cache_get(page); BUG_ON(PagePrivate(page)); BUG_ON(!PageLocked(page)); BUG_ON(page->mapping->host != inode); req->wb_offset = offset; req->wb_pgbase = offset; req->wb_bytes = count; atomic_set(&req->wb_count, 1); req->wb_context = get_nfs_open_context(ctx); return req; }
/* * Free the swap entry like above, but also try to * free the page cache entry if it is the last user. */ void free_swap_and_cache(swp_entry_t entry) { struct swap_info_struct * p; struct page *page = NULL; p = swap_info_get(entry); if (p) { if (swap_entry_free(p, SWP_OFFSET(entry)) == 1) page = find_trylock_page(&swapper_space, entry.val); swap_info_put(p); } if (page) { page_cache_get(page); /* Only cache user (+us), or swap space full? Free it! */ if (page_count(page) - !!page->buffers == 2 || vm_swap_full()) { delete_from_swap_cache(page); SetPageDirty(page); } UnlockPage(page); page_cache_release(page); } }
static int HgfsReadpage(struct file *file, // IN: File to read from struct page *page) // IN/OUT: Page to write to { int result = 0; HgfsHandle handle; ASSERT(file); ASSERT(file->f_dentry); ASSERT(file->f_dentry->d_inode); ASSERT(page); handle = FILE_GET_FI_P(file)->handle; LOG(6, (KERN_WARNING "VMware hgfs: HgfsReadPage: reading from handle %u\n", handle)); page_cache_get(page); result = HgfsDoReadpage(handle, page, 0, PAGE_CACHE_SIZE); page_cache_release(page); compat_unlock_page(page); return result; }
/** * nfs_create_request - Create an NFS read/write request. * @ctx: open context to use * @page: page to write * @last: last nfs request created for this page group or NULL if head * @offset: starting offset within the page for the write * @count: number of bytes to read/write * * The page must be locked by the caller. This makes sure we never * create two different requests for the same page. * User should ensure it is safe to sleep in this function. */ struct nfs_page * nfs_create_request(struct nfs_open_context *ctx, struct page *page, struct nfs_page *last, unsigned int offset, unsigned int count) { struct nfs_page *req; struct nfs_lock_context *l_ctx; if (test_bit(NFS_CONTEXT_BAD, &ctx->flags)) return ERR_PTR(-EBADF); /* try to allocate the request struct */ req = nfs_page_alloc(); if (req == NULL) return ERR_PTR(-ENOMEM); /* get lock context early so we can deal with alloc failures */ l_ctx = nfs_get_lock_context(ctx); if (IS_ERR(l_ctx)) { nfs_page_free(req); return ERR_CAST(l_ctx); } req->wb_lock_context = l_ctx; nfs_iocounter_inc(&l_ctx->io_count); /* Initialize the request struct. Initially, we assume a * long write-back delay. This will be adjusted in * update_nfs_request below if the region is not locked. */ req->wb_page = page; req->wb_index = page_file_index(page); page_cache_get(page); req->wb_offset = offset; req->wb_pgbase = offset; req->wb_bytes = count; req->wb_context = get_nfs_open_context(ctx); kref_init(&req->wb_kref); nfs_page_group_init(req, last); return req; }
/** * nfs_create_request - Create an NFS read/write request. * @file: file descriptor to use * @inode: inode to which the request is attached * @page: page to write * @offset: starting offset within the page for the write * @count: number of bytes to read/write * * The page must be locked by the caller. This makes sure we never * create two different requests for the same page. * User should ensure it is safe to sleep in this function. */ struct nfs_page * nfs_create_request(struct nfs_open_context *ctx, struct inode *inode, struct page *page, unsigned int offset, unsigned int count) { struct nfs_page *req; /* try to allocate the request struct */ req = nfs_page_alloc(); if (req == NULL) return ERR_PTR(-ENOMEM); /* get lock context early so we can deal with alloc failures */ req->wb_lock_context = nfs_get_lock_context(ctx); if (req->wb_lock_context == NULL) { nfs_page_free(req); return ERR_PTR(-ENOMEM); } /* Initialize the request struct. Initially, we assume a * long write-back delay. This will be adjusted in * update_nfs_request below if the region is not locked. */ req->wb_page = page; atomic_set(&req->wb_complete, 0); req->wb_index = page->index; page_cache_get(page); BUG_ON(PagePrivate(page)); BUG_ON(!PageLocked(page)); BUG_ON(page->mapping->host != inode); req->wb_offset = offset; req->wb_pgbase = offset; req->wb_bytes = count; req->wb_context = get_nfs_open_context(ctx); kref_init(&req->wb_kref); return req; }
/** * write_one_page - write out a single page and optionally wait on I/O * @page: the page to write * @wait: if true, wait on writeout * * The page must be locked by the caller and will be unlocked upon return. * * write_one_page() returns a negative error code if I/O failed. */ int write_one_page(struct page *page, int wait) { struct address_space *mapping = page->mapping; int ret = 0; struct writeback_control wbc = { .sync_mode = WB_SYNC_ALL, .nr_to_write = 1, }; BUG_ON(!PageLocked(page)); if (wait) wait_on_page_writeback(page); if (clear_page_dirty_for_io(page)) { page_cache_get(page); ret = mapping->a_ops->writepage(page, &wbc); if (ret == 0 && wait) { wait_on_page_writeback(page); if (PageError(page)) ret = -EIO; } page_cache_release(page); } else { unlock_page(page); } return ret; } EXPORT_SYMBOL(write_one_page); /* * For address_spaces which do not use buffers nor write back. */ int __set_page_dirty_no_writeback(struct page *page) { if (!PageDirty(page)) SetPageDirty(page); return 0; } /* * For address_spaces which do not use buffers. Just tag the page as dirty in * its radix tree. * * This is also used when a single buffer is being dirtied: we want to set the * page dirty in that case, but not all the buffers. This is a "bottom-up" * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying. * * Most callers have locked the page, which pins the address_space in memory. * But zap_pte_range() does not lock the page, however in that case the * mapping is pinned by the vma's ->vm_file reference. * * We take care to handle the case where the page was truncated from the * mapping by re-checking page_mapping() inside tree_lock. */ int __set_page_dirty_nobuffers(struct page *page) { if (!TestSetPageDirty(page)) { struct address_space *mapping = page_mapping(page); struct address_space *mapping2; if (!mapping) return 1; spin_lock_irq(&mapping->tree_lock); mapping2 = page_mapping(page); if (mapping2) { /* Race with truncate? */ BUG_ON(mapping2 != mapping); WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); if (mapping_cap_account_dirty(mapping)) { __inc_zone_page_state(page, NR_FILE_DIRTY); __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); task_io_account_write(PAGE_CACHE_SIZE); } radix_tree_tag_set(&mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY); } spin_unlock_irq(&mapping->tree_lock); if (mapping->host) { /* !PageAnon && !swapper_space */ __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); } return 1; } return 0; } EXPORT_SYMBOL(__set_page_dirty_nobuffers); /* * When a writepage implementation decides that it doesn't want to write this * page for some reason, it should redirty the locked page via * redirty_page_for_writepage() and it should then unlock the page and return 0 */ int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page) { wbc->pages_skipped++; return __set_page_dirty_nobuffers(page); }