/* * Work out if there are any other processes sharing this * swap cache page. Free it if you can. Return success. */ int remove_exclusive_swap_page(struct page *page) { int retval; struct swap_info_struct * p; swp_entry_t entry; if (!PageLocked(page)) BUG(); if (!PageSwapCache(page)) return 0; if (page_count(page) - !!page->buffers != 2) /* 2: us + cache */ return 0; entry.val = page->index; p = swap_info_get(entry); if (!p) return 0; /* Is the only swap cache user the cache itself? */ retval = 0; if (p->swap_map[SWP_OFFSET(entry)] == 1) { /* Recheck the page count with the pagecache lock held.. */ spin_lock(&pagecache_lock); if (page_count(page) - !!page->buffers == 2) { __delete_from_swap_cache(page); SetPageDirty(page); retval = 1; } spin_unlock(&pagecache_lock); } swap_info_put(p); if (retval) { block_flushpage(page, 0); swap_free(entry); page_cache_release(page); } return retval; }
void show_mem(void) { int free = 0, total = 0, reserved = 0; int shared = 0, cached = 0, slab = 0, node, i; struct meminfo * mi = &meminfo; printk("Mem-info:\n"); show_free_areas(); printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); for_each_online_node(node) { pg_data_t *n = NODE_DATA(node); struct page *map = n->node_mem_map - n->node_start_pfn; for_each_nodebank (i,mi,node) { unsigned int pfn1, pfn2; struct page *page, *end; pfn1 = __phys_to_pfn(mi->bank[i].start); pfn2 = __phys_to_pfn(mi->bank[i].size + mi->bank[i].start); page = map + pfn1; end = map + pfn2; do { total++; if (PageReserved(page)) reserved++; else if (PageSwapCache(page)) cached++; else if (PageSlab(page)) slab++; else if (!page_count(page)) free++; else shared += page_count(page) - 1; page++; } while (page < end); } }
void show_mem(void) { unsigned long total = 0, reserved = 0; unsigned long shared = 0, cached = 0; unsigned long highmem = 0; struct page *page; pg_data_t *pgdat; unsigned long i; printk("Mem-info:\n"); show_free_areas(); printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); for_each_online_pgdat(pgdat) { unsigned long flags; pgdat_resize_lock(pgdat, &flags); for (i = 0; i < pgdat->node_spanned_pages; i++) { if (!pfn_valid(pgdat->node_start_pfn + i)) continue; page = pgdat_page_nr(pgdat, i); total++; if (PageHighMem(page)) highmem++; if (PageReserved(page)) reserved++; else if (PageSwapCache(page)) cached++; else if (page_count(page)) shared += page_count(page) - 1; } pgdat_resize_unlock(pgdat, &flags); } printk("%ld pages of RAM\n", total); #ifdef CONFIG_HIGHMEM printk("%ld pages of HIGHMEM\n", highmem); #endif printk("%ld reserved pages\n", reserved); printk("%ld pages shared\n", shared); printk("%ld pages swap cached\n", cached); }
static void swap_slot_free_notify(struct page *page) { struct swap_info_struct *sis; struct gendisk *disk; /* * There is no guarantee that the page is in swap cache - the software * suspend code (at least) uses end_swap_bio_read() against a non- * swapcache page. So we must check PG_swapcache before proceeding with * this optimization. */ if (unlikely(!PageSwapCache(page))) return; sis = page_swap_info(page); if (!(sis->flags & SWP_BLKDEV)) return; /* * The swap subsystem performs lazy swap slot freeing, * expecting that the page will be swapped out again. * So we can avoid an unnecessary write if the page * isn't redirtied. * This is good for real swap storage because we can * reduce unnecessary I/O and enhance wear-leveling * if an SSD is used as the as swap device. * But if in-memory swap device (eg zram) is used, * this causes a duplicated copy between uncompressed * data in VM-owned memory and compressed data in * zram-owned memory. So let's free zram-owned memory * and make the VM-owned decompressed page *dirty*, * so the page should be swapped out somewhere again if * we again wish to reclaim it. */ disk = sis->bdev->bd_disk; if (disk->fops->swap_slot_free_notify) { swp_entry_t entry; unsigned long offset; <<<<<<< HEAD
/* * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space, * but sets SwapCache flag and private instead of mapping and index. */ int __add_to_swap_cache(struct page *page, swp_entry_t entry) { int error; struct address_space *address_space; VM_BUG_ON(!PageLocked(page)); VM_BUG_ON(PageSwapCache(page)); VM_BUG_ON(!PageSwapBacked(page)); page_cache_get(page); SetPageSwapCache(page); set_page_private(page, entry.val); address_space = swap_address_space(entry); spin_lock_irq(&address_space->tree_lock); error = radix_tree_insert(&address_space->page_tree, entry.val, page); if (likely(!error)) { address_space->nrpages++; __inc_zone_page_state(page, NR_FILE_PAGES); __inc_zone_page_state(page, NR_SWAPCACHE); INC_CACHE_INFO(add_total); } spin_unlock_irq(&address_space->tree_lock); if (unlikely(error)) { /* * Only the context which have set SWAP_HAS_CACHE flag * would call add_to_swap_cache(). * So add_to_swap_cache() doesn't returns -EEXIST. */ VM_BUG_ON(error == -EEXIST); set_page_private(page, 0UL); ClearPageSwapCache(page); page_cache_release(page); } return error; }
void show_mem(void) { int free = 0, total = 0, reserved = 0; int shared = 0, cached = 0, slab = 0, node; printk("Mem-info:\n"); show_free_areas(); printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); for_each_online_node(node) { struct page *page, *end; page = NODE_MEM_MAP(node); end = page + NODE_DATA(node)->node_spanned_pages; do { total++; if (PageReserved(page)) reserved++; else if (PageSwapCache(page)) cached++; else if (PageSlab(page)) slab++; else if (!page_count(page)) free++; else shared += page_count(page) - 1; page++; } while (page < end); } printk("%d pages of RAM\n", total); printk("%d free pages\n", free); printk("%d reserved pages\n", reserved); printk("%d slab pages\n", slab); printk("%d pages shared\n", shared); printk("%d pages swap cached\n", cached); }
/* * We can use this swap cache entry directly * if there are no other references to it. * * Here "exclusive_swap_page()" does the real * work, but we opportunistically check whether * we need to get all the locks first.. */ int can_share_swap_page(struct page *page) { int retval = 0; if (!PageLocked(page)) BUG(); switch (page_count(page)) { case 3: if (!page->buffers) break; /* Fallthrough */ case 2: if (!PageSwapCache(page)) break; retval = exclusive_swap_page(page); break; case 1: if (PageReserved(page)) break; retval = 1; } return retval; }
/* * This must be called only on pages that have * been verified to be in the swap cache. */ void __delete_from_swap_cache(struct page *page) { struct address_space *address_space; int i, nr = hpage_nr_pages(page); swp_entry_t entry; pgoff_t idx; VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(!PageSwapCache(page), page); VM_BUG_ON_PAGE(PageWriteback(page), page); entry.val = page_private(page); address_space = swap_address_space(entry); idx = swp_offset(entry); for (i = 0; i < nr; i++) { radix_tree_delete(&address_space->page_tree, idx + i); set_page_private(page + i, 0); } ClearPageSwapCache(page); address_space->nrpages -= nr; __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr); ADD_CACHE_INFO(del_total, nr); }
static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec, void *arg) { if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && !PageSwapCache(page) && !PageUnevictable(page)) { bool active = PageActive(page); del_page_from_lru_list(page, lruvec, LRU_INACTIVE_ANON + active); ClearPageActive(page); ClearPageReferenced(page); /* * lazyfree pages are clean anonymous pages. They have * SwapBacked flag cleared to distinguish normal anonymous * pages */ ClearPageSwapBacked(page); add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE); __count_vm_events(PGLAZYFREE, hpage_nr_pages(page)); count_memcg_page_event(page, PGLAZYFREE); update_page_reclaim_stat(lruvec, 1, 0); } }
/* * This routine handles present pages, when users try to write * to a shared page. It is done by copying the page to a new address * and decrementing the shared-page counter for the old page. * * Goto-purists beware: the only reason for goto's here is that it results * in better assembly code.. The "default" path will see no jumps at all. * * Note that this routine assumes that the protection checks have been * done by the caller (the low-level page fault routine in most cases). * Thus we can safely just mark it writable once we've done any necessary * COW. * * We also mark the page dirty at this point even though the page will * change only once the write actually happens. This avoids a few races, * and potentially makes it more efficient. * * We enter with the page table read-lock held, and need to exit without * it. */ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma, unsigned long address, pte_t *page_table, pte_t pte) { struct page *old_page, *new_page; old_page = pte_page(pte); if (!VALID_PAGE(old_page)) goto bad_wp_page; /* * We can avoid the copy if: * - we're the only user (count == 1) * - the only other user is the swap cache, * and the only swap cache user is itself, * in which case we can just continue to * use the same swap cache (it will be * marked dirty). */ switch (page_count(old_page)) { case 2: /* * Lock the page so that no one can look it up from * the swap cache, grab a reference and start using it. * Can not do lock_page, holding page_table_lock. */ if (!PageSwapCache(old_page) || TryLockPage(old_page)) break; if (is_page_shared(old_page)) { UnlockPage(old_page); break; } UnlockPage(old_page); /* FallThrough */ case 1: flush_cache_page(vma, address); establish_pte(vma, address, page_table, pte_mkyoung(pte_mkdirty(pte_mkwrite(pte)))); spin_unlock(&mm->page_table_lock); return 1; /* Minor fault */ } /* * Ok, we need to copy. Oh, well.. */ spin_unlock(&mm->page_table_lock); new_page = page_cache_alloc(); if (!new_page) return -1; spin_lock(&mm->page_table_lock); /* * Re-check the pte - we dropped the lock */ if (pte_same(*page_table, pte)) { if (PageReserved(old_page)) ++mm->rss; break_cow(vma, old_page, new_page, address, page_table); /* Free the old page.. */ new_page = old_page; } spin_unlock(&mm->page_table_lock); page_cache_release(new_page); return 1; /* Minor fault */ bad_wp_page: spin_unlock(&mm->page_table_lock); printk("do_wp_page: bogus page at address %08lx (page 0x%lx)\n",address,(unsigned long)old_page); return -1; }
/* * We completely avoid races by reading each swap page in advance, * and then search for the process using it. All the necessary * page table adjustments can then be made atomically. */ static int try_to_unuse(unsigned int type) { struct swap_info_struct * si = &swap_info[type]; struct mm_struct *start_mm; unsigned short *swap_map; unsigned short swcount; struct page *page; swp_entry_t entry; int i = 0; int retval = 0; int reset_overflow = 0; /* * When searching mms for an entry, a good strategy is to * start at the first mm we freed the previous entry from * (though actually we don't notice whether we or coincidence * freed the entry). Initialize this start_mm with a hold. * * A simpler strategy would be to start at the last mm we * freed the previous entry from; but that would take less * advantage of mmlist ordering (now preserved by swap_out()), * which clusters forked address spaces together, most recent * child immediately after parent. If we race with dup_mmap(), * we very much want to resolve parent before child, otherwise * we may miss some entries: using last mm would invert that. */ start_mm = &init_mm; atomic_inc(&init_mm.mm_users); /* * Keep on scanning until all entries have gone. Usually, * one pass through swap_map is enough, but not necessarily: * mmput() removes mm from mmlist before exit_mmap() and its * zap_page_range(). That's not too bad, those entries are * on their way out, and handled faster there than here. * do_munmap() behaves similarly, taking the range out of mm's * vma list before zap_page_range(). But unfortunately, when * unmapping a part of a vma, it takes the whole out first, * then reinserts what's left after (might even reschedule if * open() method called) - so swap entries may be invisible * to swapoff for a while, then reappear - but that is rare. */ while ((i = find_next_to_unuse(si, i))) { /* * Get a page for the entry, using the existing swap * cache page if there is one. Otherwise, get a clean * page and read the swap into it. */ swap_map = &si->swap_map[i]; entry = SWP_ENTRY(type, i); page = read_swap_cache_async(entry); if (!page) { /* * Either swap_duplicate() failed because entry * has been freed independently, and will not be * reused since sys_swapoff() already disabled * allocation from here, or alloc_page() failed. */ if (!*swap_map) continue; retval = -ENOMEM; break; } /* * Don't hold on to start_mm if it looks like exiting. */ if (atomic_read(&start_mm->mm_users) == 1) { mmput(start_mm); start_mm = &init_mm; atomic_inc(&init_mm.mm_users); } /* * Wait for and lock page. When do_swap_page races with * try_to_unuse, do_swap_page can handle the fault much * faster than try_to_unuse can locate the entry. This * apparently redundant "wait_on_page" lets try_to_unuse * defer to do_swap_page in such a case - in some tests, * do_swap_page and try_to_unuse repeatedly compete. */ wait_on_page(page); lock_page(page); /* * Remove all references to entry, without blocking. * Whenever we reach init_mm, there's no address space * to search, but use it as a reminder to search shmem. */ swcount = *swap_map; if (swcount > 1) { flush_page_to_ram(page); if (start_mm == &init_mm) shmem_unuse(entry, page); else unuse_process(start_mm, entry, page); } if (*swap_map > 1) { int set_start_mm = (*swap_map >= swcount); struct list_head *p = &start_mm->mmlist; struct mm_struct *new_start_mm = start_mm; struct mm_struct *mm; spin_lock(&mmlist_lock); while (*swap_map > 1 && (p = p->next) != &start_mm->mmlist) { mm = list_entry(p, struct mm_struct, mmlist); swcount = *swap_map; if (mm == &init_mm) { set_start_mm = 1; shmem_unuse(entry, page); } else unuse_process(mm, entry, page); if (set_start_mm && *swap_map < swcount) { new_start_mm = mm; set_start_mm = 0; } } atomic_inc(&new_start_mm->mm_users); spin_unlock(&mmlist_lock); mmput(start_mm); start_mm = new_start_mm; } /* * How could swap count reach 0x7fff when the maximum * pid is 0x7fff, and there's no way to repeat a swap * page within an mm (except in shmem, where it's the * shared object which takes the reference count)? * We believe SWAP_MAP_MAX cannot occur in Linux 2.4. * * If that's wrong, then we should worry more about * exit_mmap() and do_munmap() cases described above: * we might be resetting SWAP_MAP_MAX too early here. * We know "Undead"s can happen, they're okay, so don't * report them; but do report if we reset SWAP_MAP_MAX. */ if (*swap_map == SWAP_MAP_MAX) { swap_list_lock(); swap_device_lock(si); nr_swap_pages++; *swap_map = 1; swap_device_unlock(si); swap_list_unlock(); reset_overflow = 1; } /* * If a reference remains (rare), we would like to leave * the page in the swap cache; but try_to_swap_out could * then re-duplicate the entry once we drop page lock, * so we might loop indefinitely; also, that page could * not be swapped out to other storage meanwhile. So: * delete from cache even if there's another reference, * after ensuring that the data has been saved to disk - * since if the reference remains (rarer), it will be * read from disk into another page. Splitting into two * pages would be incorrect if swap supported "shared * private" pages, but they are handled by tmpfs files. * Note shmem_unuse already deleted its from swap cache. */ if ((*swap_map > 1) && PageDirty(page) && PageSwapCache(page)) { rw_swap_page(WRITE, page); lock_page(page); } if (PageSwapCache(page)) delete_from_swap_cache(page); /* * So we could skip searching mms once swap count went * to 1, we did not mark any present ptes as dirty: must * mark page dirty so try_to_swap_out will preserve it. */ SetPageDirty(page); UnlockPage(page); page_cache_release(page); /* * Make sure that we aren't completely killing * interactive performance. Interruptible check on * signal_pending() would be nice, but changes the spec? */ if (current->need_resched) schedule(); }
/* * We completely avoid races by reading each swap page in advance, * and then search for the process using it. All the necessary * page table adjustments can then be made atomically. */ static int try_to_unuse(unsigned int type) { struct swap_info_struct * si = &swap_info[type]; struct mm_struct *start_mm; unsigned short *swap_map; unsigned short swcount; struct page *page; swp_entry_t entry; unsigned int i = 0; int retval = 0; int reset_overflow = 0; int shmem; /* * When searching mms for an entry, a good strategy is to * start at the first mm we freed the previous entry from * (though actually we don't notice whether we or coincidence * freed the entry). Initialize this start_mm with a hold. * * A simpler strategy would be to start at the last mm we * freed the previous entry from; but that would take less * advantage of mmlist ordering, which clusters forked mms * together, child after parent. If we race with dup_mmap(), we * prefer to resolve parent before child, lest we miss entries * duplicated after we scanned child: using last mm would invert * that. Though it's only a serious concern when an overflowed * swap count is reset from SWAP_MAP_MAX, preventing a rescan. */ start_mm = &init_mm; atomic_inc(&init_mm.mm_users); /* * Keep on scanning until all entries have gone. Usually, * one pass through swap_map is enough, but not necessarily: * there are races when an instance of an entry might be missed. */ while ((i = find_next_to_unuse(si, i)) != 0) { if (signal_pending(current)) { retval = -EINTR; break; } /* * Get a page for the entry, using the existing swap * cache page if there is one. Otherwise, get a clean * page and read the swap into it. */ swap_map = &si->swap_map[i]; entry = swp_entry(type, i); page = read_swap_cache_async(entry, NULL, 0); if (!page) { /* * Either swap_duplicate() failed because entry * has been freed independently, and will not be * reused since sys_swapoff() already disabled * allocation from here, or alloc_page() failed. */ if (!*swap_map) continue; retval = -ENOMEM; break; } /* * Don't hold on to start_mm if it looks like exiting. */ if (atomic_read(&start_mm->mm_users) == 1) { mmput(start_mm); start_mm = &init_mm; atomic_inc(&init_mm.mm_users); } /* * Wait for and lock page. When do_swap_page races with * try_to_unuse, do_swap_page can handle the fault much * faster than try_to_unuse can locate the entry. This * apparently redundant "wait_on_page_locked" lets try_to_unuse * defer to do_swap_page in such a case - in some tests, * do_swap_page and try_to_unuse repeatedly compete. */ wait_on_page_locked(page); wait_on_page_writeback(page); lock_page(page); wait_on_page_writeback(page); /* * Remove all references to entry. * Whenever we reach init_mm, there's no address space * to search, but use it as a reminder to search shmem. */ shmem = 0; swcount = *swap_map; if (swcount > 1) { if (start_mm == &init_mm) shmem = shmem_unuse(entry, page); else retval = unuse_mm(start_mm, entry, page); } if (*swap_map > 1) { int set_start_mm = (*swap_map >= swcount); struct list_head *p = &start_mm->mmlist; struct mm_struct *new_start_mm = start_mm; struct mm_struct *prev_mm = start_mm; struct mm_struct *mm; atomic_inc(&new_start_mm->mm_users); atomic_inc(&prev_mm->mm_users); spin_lock(&mmlist_lock); while (*swap_map > 1 && !retval && (p = p->next) != &start_mm->mmlist) { mm = list_entry(p, struct mm_struct, mmlist); if (!atomic_inc_not_zero(&mm->mm_users)) continue; spin_unlock(&mmlist_lock); mmput(prev_mm); prev_mm = mm; cond_resched(); swcount = *swap_map; if (swcount <= 1) ; else if (mm == &init_mm) { set_start_mm = 1; shmem = shmem_unuse(entry, page); } else retval = unuse_mm(mm, entry, page); if (set_start_mm && *swap_map < swcount) { mmput(new_start_mm); atomic_inc(&mm->mm_users); new_start_mm = mm; set_start_mm = 0; } spin_lock(&mmlist_lock); } spin_unlock(&mmlist_lock); mmput(prev_mm); mmput(start_mm); start_mm = new_start_mm; } if (retval) { unlock_page(page); page_cache_release(page); break; } /* * How could swap count reach 0x7fff when the maximum * pid is 0x7fff, and there's no way to repeat a swap * page within an mm (except in shmem, where it's the * shared object which takes the reference count)? * We believe SWAP_MAP_MAX cannot occur in Linux 2.4. * * If that's wrong, then we should worry more about * exit_mmap() and do_munmap() cases described above: * we might be resetting SWAP_MAP_MAX too early here. * We know "Undead"s can happen, they're okay, so don't * report them; but do report if we reset SWAP_MAP_MAX. */ if (*swap_map == SWAP_MAP_MAX) { spin_lock(&swap_lock); *swap_map = 1; spin_unlock(&swap_lock); reset_overflow = 1; } /* * If a reference remains (rare), we would like to leave * the page in the swap cache; but try_to_unmap could * then re-duplicate the entry once we drop page lock, * so we might loop indefinitely; also, that page could * not be swapped out to other storage meanwhile. So: * delete from cache even if there's another reference, * after ensuring that the data has been saved to disk - * since if the reference remains (rarer), it will be * read from disk into another page. Splitting into two * pages would be incorrect if swap supported "shared * private" pages, but they are handled by tmpfs files. * * Note shmem_unuse already deleted a swappage from * the swap cache, unless the move to filepage failed: * in which case it left swappage in cache, lowered its * swap count to pass quickly through the loops above, * and now we must reincrement count to try again later. */ if ((*swap_map > 1) && PageDirty(page) && PageSwapCache(page)) { struct writeback_control wbc = { .sync_mode = WB_SYNC_NONE, }; swap_writepage(page, &wbc); lock_page(page); wait_on_page_writeback(page); } if (PageSwapCache(page)) { if (shmem) swap_duplicate(entry); else delete_from_swap_cache(page); } /* * So we could skip searching mms once swap count went * to 1, we did not mark any present ptes as dirty: must * mark page dirty so shrink_page_list will preserve it. */ SetPageDirty(page); unlock_page(page); page_cache_release(page); /* * Make sure that we aren't completely killing * interactive performance. */ cond_resched(); }
int mm_putFreePages(unsigned long addr, unsigned long order) { unsigned long map_nr = MAP_NR(addr); int ret = 0; int page_order = order; unsigned long flags; stat_frees++; #ifdef MEMLEAK_TOOL memleakHook_free(addr,0); #endif spin_lock_irqsave(&free_area_lock, flags); if (map_nr < g_max_mapnr) { page_struct_t * map = g_mem_map + map_nr; if (PageReserved(map)) { BUG(); } if (PageNetBuf(map)){ BUG(); } #ifdef MEMORY_DEBUG if (map->option_data != 0){ BUG(); } #endif if (atomic_dec_and_test(&map->count)) { if (PageSwapCache(map)){ ut_log("PANIC Freeing swap cache pages"); BUG(); } // map->flags &= ~(1 << PG_referenced); _free_pages_ok(map_nr, order); if (init_done == 1) { DEBUG(" Freeing memory addr:%x order:%d \n", addr, order); }else{ // BUG(); } ret = 1; } }else{ BUG(); } last: if (ret){ unsigned long i = (1 << page_order); struct page *page = virt_to_page(addr); while (i--) { #ifdef MEMORY_DEBUG if (!PageReferenced(page)){ ut_printf("Page Backtrace in Free Page :\n"); ut_printBackTrace(page->bt_addr_list,MAX_BACKTRACE_LENGTH); } #endif assert(PageReferenced(page)); PageClearReferenced(page); #ifdef MEMORY_DEBUG ut_storeBackTrace(page->bt_addr_list,MAX_BACKTRACE_LENGTH); #endif page++; } }else{ BUG(); } spin_unlock_irqrestore(&free_area_lock, flags); return ret; }
static void __free_pages_ok (struct page *page, unsigned int order) { unsigned long index, page_idx, mask, flags; free_area_t *area; struct page *base; zone_t *zone; if (PageLRU(page)) lru_cache_del(page); if (page->buffers) BUG(); if (page->mapping) BUG(); if (!VALID_PAGE(page)) BUG(); if (PageSwapCache(page)) BUG(); if (PageLocked(page)) BUG(); if (PageLRU(page)) BUG(); if (PageActive(page)) BUG(); TRACE_MEMORY(TRACE_EV_MEMORY_PAGE_FREE, order); page->flags &= ~((1<<PG_referenced) | (1<<PG_dirty)); if (current->flags & PF_FREE_PAGES) goto local_freelist; back_local_freelist: zone = page->zone; mask = (~0UL) << order; base = zone->zone_mem_map; page_idx = page - base; if (page_idx & ~mask) BUG(); index = page_idx >> (1 + order); area = zone->free_area + order; spin_lock_irqsave(&zone->lock, flags); zone->free_pages -= mask; while (mask + (1 << (MAX_ORDER-1))) { struct page *buddy1, *buddy2; if (area >= zone->free_area + MAX_ORDER) BUG(); if (!__test_and_change_bit(index, area->map)) /* * the buddy page is still allocated. */ break; /* * Move the buddy up one level. */ buddy1 = base + (page_idx ^ -mask); buddy2 = base + page_idx; if (BAD_RANGE(zone,buddy1)) BUG(); if (BAD_RANGE(zone,buddy2)) BUG(); memlist_del(&buddy1->list); mask <<= 1; area++; index >>= 1; page_idx &= mask; } memlist_add_head(&(base + page_idx)->list, &area->free_list); spin_unlock_irqrestore(&zone->lock, flags); return; local_freelist: if (current->nr_local_pages) goto back_local_freelist; if (in_interrupt()) goto back_local_freelist; list_add(&page->list, ¤t->local_pages); page->index = order; current->nr_local_pages++; }
static int unswap_by_move(unsigned short *map, unsigned long max, unsigned long start, unsigned long n_pages) { struct task_struct *p; unsigned long entry, rover = (start == 1) ? n_pages+1 : 1; unsigned long i, j; DPRINTK( "unswapping %lu..%lu by moving in swap\n", start, start+n_pages-1 ); /* can free the allocated pages by moving them to other swap pages */ for( i = start; i < start+n_pages; ++i ) { if (!map[i]) { map[i] = SWAP_MAP_BAD; DPRINTK( "unswap: page %lu was free\n", i ); continue; } else if (map[i] == SWAP_MAP_BAD) { printk( KERN_ERR "get_stram_region: page %lu already " "reserved??\n", i ); } DPRINTK( "unswap: page %lu is alloced, count=%u\n", i, map[i] ); /* find a free page not in our region */ for( j = rover; j != rover-1; j = (j == max-1) ? 1 : j+1 ) { if (j >= start && j < start+n_pages) continue; if (!map[j]) { rover = j+1; break; } } if (j == rover-1) { printk( KERN_ERR "get_stram_region: not enough free swap " "pages now??\n" ); return( -ENOMEM ); } DPRINTK( "unswap: map[i=%lu]=%u map[j=%lu]=%u nr_swap=%u\n", i, map[i], j, map[j], nr_swap_pages ); --nr_swap_pages; entry = SWP_ENTRY( stram_swap_type, j ); if (stram_swap_info->lowest_bit == j) stram_swap_info->lowest_bit++; if (stram_swap_info->highest_bit == j) stram_swap_info->highest_bit--; memcpy( SWAP_ADDR(j), SWAP_ADDR(i), PAGE_SIZE ); #ifdef DO_PROC stat_swap_move++; #endif while( map[i] ) { read_lock(&tasklist_lock); for_each_task(p) { if (unswap_process( p->mm, SWP_ENTRY( stram_swap_type, i ), entry, 1 )) { read_unlock(&tasklist_lock); map[j]++; goto repeat; } } read_unlock(&tasklist_lock); if (map[i] && map[i] != SWAP_MAP_MAX) { printk( KERN_ERR "get_stram_region: ST-RAM swap page %lu " "not used by any process\n", i ); /* quit while loop and overwrite bad map entry */ break; } else if (!map[i]) { /* somebody else must have swapped in that page, so free the * new one (we're moving to) */ DPRINTK( "unswap: map[i] became 0, also clearing map[j]\n" ); map[j] = 0; } repeat: } DPRINTK( "unswap: map[i=%lu]=%u map[j=%lu]=%u nr_swap=%u\n", i, map[i], j, map[j], nr_swap_pages ); map[i] = SWAP_MAP_BAD; if (stram_swap_info->lowest_bit == i) stram_swap_info->lowest_bit++; if (stram_swap_info->highest_bit == i) stram_swap_info->highest_bit--; --nr_swap_pages; } return( 0 ); } #endif static int unswap_by_read(unsigned short *map, unsigned long max, unsigned long start, unsigned long n_pages) { struct task_struct *p; unsigned long entry, page; unsigned long i; struct page *page_map; DPRINTK( "unswapping %lu..%lu by reading in\n", start, start+n_pages-1 ); for( i = start; i < start+n_pages; ++i ) { if (map[i] == SWAP_MAP_BAD) { printk( KERN_ERR "get_stram_region: page %lu already " "reserved??\n", i ); continue; } if (map[i]) { entry = SWP_ENTRY(stram_swap_type, i); DPRINTK("unswap: map[i=%lu]=%u nr_swap=%u\n", i, map[i], nr_swap_pages); /* Get a page for the entry, using the existing swap cache page if there is one. Otherwise, get a clean page and read the swap into it. */ page_map = read_swap_cache(entry); if (page_map) { page = page_address(page_map); read_lock(&tasklist_lock); for_each_task(p) unswap_process(p->mm, entry, page /* , 0 */); read_unlock(&tasklist_lock); shm_unuse(entry, page); /* Now get rid of the extra reference to the temporary page we've been using. */ if (PageSwapCache(page_map)) delete_from_swap_cache(page_map); __free_page(page_map); #ifdef DO_PROC stat_swap_force++; #endif } else if (map[i]) return -ENOMEM; } DPRINTK( "unswap: map[i=%lu]=%u nr_swap=%u\n", i, map[i], nr_swap_pages ); map[i] = SWAP_MAP_BAD; if (stram_swap_info->lowest_bit == i) stram_swap_info->lowest_bit++; if (stram_swap_info->highest_bit == i) stram_swap_info->highest_bit--; --nr_swap_pages; } return 0; } /* * reserve a region in ST-RAM swap space for an allocation */ static void *get_stram_region( unsigned long n_pages ) { unsigned short *map = stram_swap_info->swap_map; unsigned long max = stram_swap_info->max; unsigned long start, total_free, region_free; int err; void *ret = NULL; DPRINTK( "get_stram_region(n_pages=%lu)\n", n_pages ); down(&stram_swap_sem); /* disallow writing to the swap device now */ stram_swap_info->flags = SWP_USED; /* find a region of n_pages pages in the swap space including as much free * pages as possible (and excluding any already-reserved pages). */ if (!(start = find_free_region( n_pages, &total_free, ®ion_free ))) goto end; DPRINTK( "get_stram_region: region starts at %lu, has %lu free pages\n", start, region_free ); #if 0 err = ((total_free-region_free >= n_pages-region_free) ? unswap_by_move( map, max, start, n_pages ) : unswap_by_read( map, max, start, n_pages )); #else err = unswap_by_read(map, max, start, n_pages); #endif if (err) goto end; ret = SWAP_ADDR(start); end: /* allow using swap device again */ stram_swap_info->flags = SWP_WRITEOK; up(&stram_swap_sem); DPRINTK( "get_stram_region: returning %p\n", ret ); return( ret ); }
void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) { struct page_ext *page_ext = lookup_page_ext(page); struct stack_trace trace = { .nr_entries = 0, .max_entries = ARRAY_SIZE(page_ext->trace_entries), .entries = &page_ext->trace_entries[0], .skip = 3, }; save_stack_trace(&trace); page_ext->order = order; page_ext->gfp_mask = gfp_mask; page_ext->nr_entries = trace.nr_entries; __set_bit(PAGE_EXT_OWNER, &page_ext->flags); } static ssize_t print_page_owner(char __user *buf, size_t count, unsigned long pfn, struct page *page, struct page_ext *page_ext) { int ret; int pageblock_mt, page_mt; char *kbuf; struct stack_trace trace = { .nr_entries = page_ext->nr_entries, .entries = &page_ext->trace_entries[0], }; kbuf = kmalloc(count, GFP_KERNEL); if (!kbuf) return -ENOMEM; ret = snprintf(kbuf, count, "Page allocated via order %u, mask 0x%x\n", page_ext->order, page_ext->gfp_mask); if (ret >= count) goto err; /* Print information relevant to grouping pages by mobility */ pageblock_mt = get_pfnblock_migratetype(page, pfn); page_mt = gfpflags_to_migratetype(page_ext->gfp_mask); ret += snprintf(kbuf + ret, count - ret, "PFN %lu Block %lu type %d %s Flags %s%s%s%s%s%s%s%s%s%s%s%s\n", pfn, pfn >> pageblock_order, pageblock_mt, pageblock_mt != page_mt ? "Fallback" : " ", PageLocked(page) ? "K" : " ", PageError(page) ? "E" : " ", PageReferenced(page) ? "R" : " ", PageUptodate(page) ? "U" : " ", PageDirty(page) ? "D" : " ", PageLRU(page) ? "L" : " ", PageActive(page) ? "A" : " ", PageSlab(page) ? "S" : " ", PageWriteback(page) ? "W" : " ", PageCompound(page) ? "C" : " ", PageSwapCache(page) ? "B" : " ", PageMappedToDisk(page) ? "M" : " "); if (ret >= count) goto err; ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0); if (ret >= count) goto err; ret += snprintf(kbuf + ret, count - ret, "\n"); if (ret >= count) goto err; if (copy_to_user(buf, kbuf, ret)) ret = -EFAULT; kfree(kbuf); return ret; err: kfree(kbuf); return -ENOMEM; } static ssize_t read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) { unsigned long pfn; struct page *page; struct page_ext *page_ext; if (!page_owner_inited) return -EINVAL; page = NULL; pfn = min_low_pfn + *ppos; /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */ while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) pfn++; drain_all_pages(NULL); /* Find an allocated page */ for (; pfn < max_pfn; pfn++) { /* * If the new page is in a new MAX_ORDER_NR_PAGES area, * validate the area as existing, skip it if not */ if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) { pfn += MAX_ORDER_NR_PAGES - 1; continue; } /* Check for holes within a MAX_ORDER area */ if (!pfn_valid_within(pfn)) continue; page = pfn_to_page(pfn); if (PageBuddy(page)) { unsigned long freepage_order = page_order_unsafe(page); if (freepage_order < MAX_ORDER) pfn += (1UL << freepage_order) - 1; continue; } page_ext = lookup_page_ext(page); /* * Some pages could be missed by concurrent allocation or free, * because we don't hold the zone lock. */ if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) continue; /* Record the next PFN to read in the file offset */ *ppos = (pfn - min_low_pfn) + 1; return print_page_owner(buf, count, pfn, page, page_ext); } return 0; }
void end_swap_bio_read(struct bio *bio, int err) { const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct page *page = bio->bi_io_vec[0].bv_page; if (!uptodate) { SetPageError(page); ClearPageUptodate(page); printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n", imajor(bio->bi_bdev->bd_inode), iminor(bio->bi_bdev->bd_inode), (unsigned long long)bio->bi_sector); goto out; } SetPageUptodate(page); /* * There is no guarantee that the page is in swap cache - the software * suspend code (at least) uses end_swap_bio_read() against a non- * swapcache page. So we must check PG_swapcache before proceeding with * this optimization. */ if (likely(PageSwapCache(page))) { struct swap_info_struct *sis; sis = page_swap_info(page); if (sis->flags & SWP_BLKDEV) { /* * The swap subsystem performs lazy swap slot freeing, * expecting that the page will be swapped out again. * So we can avoid an unnecessary write if the page * isn't redirtied. * This is good for real swap storage because we can * reduce unnecessary I/O and enhance wear-leveling * if an SSD is used as the as swap device. * But if in-memory swap device (eg zram) is used, * this causes a duplicated copy between uncompressed * data in VM-owned memory and compressed data in * zram-owned memory. So let's free zram-owned memory * and make the VM-owned decompressed page *dirty*, * so the page should be swapped out somewhere again if * we again wish to reclaim it. */ struct gendisk *disk = sis->bdev->bd_disk; if (disk->fops->swap_slot_free_notify) { swp_entry_t entry; unsigned long offset; entry.val = page_private(page); offset = swp_offset(entry); SetPageDirty(page); disk->fops->swap_slot_free_notify(sis->bdev, offset); } } } out: unlock_page(page); bio_put(bio); }
static void rw_swap_page_base(int rw, unsigned long entry, struct page *page, int wait) { unsigned long type, offset; struct swap_info_struct * p; int zones[PAGE_SIZE/512]; int zones_used; kdev_t dev = 0; int block_size; #ifdef DEBUG_SWAP printk ("DebugVM: %s_swap_page entry %08lx, page %p (count %d), %s\n", (rw == READ) ? "read" : "write", entry, (char *) page_address(page), atomic_read(&page->count), wait ? "wait" : "nowait"); #endif type = SWP_TYPE(entry); if (type >= nr_swapfiles) { printk("Internal error: bad swap-device\n"); return; } /* Don't allow too many pending pages in flight.. */ if (atomic_read(&nr_async_pages) > pager_daemon.swap_cluster) wait = 1; p = &swap_info[type]; offset = SWP_OFFSET(entry); if (offset >= p->max) { printk("rw_swap_page: weirdness\n"); return; } if (p->swap_map && !p->swap_map[offset]) { printk(KERN_ERR "rw_swap_page: " "Trying to %s unallocated swap (%08lx)\n", (rw == READ) ? "read" : "write", entry); return; } if (!(p->flags & SWP_USED)) { printk(KERN_ERR "rw_swap_page: " "Trying to swap to unused swap-device\n"); return; } if (!PageLocked(page)) { printk(KERN_ERR "VM: swap page is unlocked\n"); return; } if (PageSwapCache(page)) { /* Make sure we are the only process doing I/O with this swap page. */ if (test_and_set_bit(offset, p->swap_lockmap)) { struct wait_queue __wait; __wait.task = current; add_wait_queue(&lock_queue, &__wait); for (;;) { current->state = TASK_UNINTERRUPTIBLE; mb(); if (!test_and_set_bit(offset, p->swap_lockmap)) break; run_task_queue(&tq_disk); schedule(); } current->state = TASK_RUNNING; remove_wait_queue(&lock_queue, &__wait); } /* * Make sure that we have a swap cache association for this * page. We need this to find which swap page to unlock once * the swap IO has completed to the physical page. If the page * is not already in the cache, just overload the offset entry * as if it were: we are not allowed to manipulate the inode * hashing for locked pages. */ if (page->offset != entry) { printk ("swap entry mismatch"); return; } } if (rw == READ) { clear_bit(PG_uptodate, &page->flags); kstat.pswpin++; } else kstat.pswpout++; atomic_inc(&page->count); if (p->swap_device) { zones[0] = offset; zones_used = 1; dev = p->swap_device; block_size = PAGE_SIZE; } else if (p->swap_file) { struct inode *swapf = p->swap_file->d_inode; int i; if (swapf->i_op->bmap == NULL && swapf->i_op->smap != NULL){ /* With MS-DOS, we use msdos_smap which returns a sector number (not a cluster or block number). It is a patch to enable the UMSDOS project. Other people are working on better solution. It sounds like ll_rw_swap_file defined its operation size (sector size) based on PAGE_SIZE and the number of blocks to read. So using bmap or smap should work even if smap will require more blocks. */ int j; unsigned int block = offset << 3; for (i=0, j=0; j< PAGE_SIZE ; i++, j += 512){ if (!(zones[i] = swapf->i_op->smap(swapf,block++))) { printk("rw_swap_page: bad swap file\n"); return; } } block_size = 512; }else{ int j; unsigned int block = offset << (PAGE_SHIFT - swapf->i_sb->s_blocksize_bits); block_size = swapf->i_sb->s_blocksize; for (i=0, j=0; j< PAGE_SIZE ; i++, j += block_size) if (!(zones[i] = bmap(swapf,block++))) { printk("rw_swap_page: bad swap file\n"); return; } zones_used = i; dev = swapf->i_dev; } } else { printk(KERN_ERR "rw_swap_page: no swap file or device\n"); /* Do some cleaning up so if this ever happens we can hopefully * trigger controlled shutdown. */ if (PageSwapCache(page)) { if (!test_and_clear_bit(offset,p->swap_lockmap)) printk("swap_after_unlock_page: lock already cleared\n"); wake_up(&lock_queue); } atomic_dec(&page->count); return; } if (!wait) { set_bit(PG_decr_after, &page->flags); atomic_inc(&nr_async_pages); } if (PageSwapCache(page)) { /* only lock/unlock swap cache pages! */ set_bit(PG_swap_unlock_after, &page->flags); } set_bit(PG_free_after, &page->flags); /* block_size == PAGE_SIZE/zones_used */ brw_page(rw, page, dev, zones, block_size, 0); /* Note! For consistency we do all of the logic, * decrementing the page count, and unlocking the page in the * swap lock map - in the IO completion handler. */ if (!wait) return; wait_on_page(page); /* This shouldn't happen, but check to be sure. */ if (atomic_read(&page->count) == 0) printk(KERN_ERR "rw_swap_page: page unused while waiting!\n"); #ifdef DEBUG_SWAP printk ("DebugVM: %s_swap_page finished on page %p (count %d)\n", (rw == READ) ? "read" : "write", (char *) page_adddress(page), atomic_read(&page->count)); #endif }
/* * Try to free buffers if "page" has them. */ static int remap_preparepage(struct page *page, int fastmode) { struct address_space *mapping; int waitcnt = fastmode ? 0 : 10; BUG_ON(!PageLocked(page)); mapping = page_mapping(page); if (PageWriteback(page) && !PagePrivate(page) && !PageSwapCache(page)) { printk("remap: mapping %p page %p\n", page->mapping, page); return -REMAPPREP_WB; } if (PageWriteback(page)) wait_on_page_writeback(page); if (PagePrivate(page)) { #ifdef DEBUG_MSG printk("rmap: process page with buffers...\n"); #endif /* XXX copied from shrink_list() */ if (PageDirty(page) && is_page_cache_freeable(page) && mapping != NULL && mapping->a_ops->writepage != NULL) { spin_lock_irq(&mapping->tree_lock); if (clear_page_dirty_for_io(page)) { int res; struct writeback_control wbc = { .sync_mode = WB_SYNC_NONE, .nr_to_write = SWAP_CLUSTER_MAX, .nonblocking = 1, .for_reclaim = 1, }; spin_unlock_irq(&mapping->tree_lock); SetPageReclaim(page); res = mapping->a_ops->writepage(page, &wbc); if (res < 0) /* not implemented. help */ BUG(); if (res == WRITEPAGE_ACTIVATE) { ClearPageReclaim(page); return -REMAPPREP_WB; } if (!PageWriteback(page)) { /* synchronous write or broken a_ops? */ ClearPageReclaim(page); } lock_page(page); if (!PagePrivate(page)) return 0; } else spin_unlock_irq(&mapping->tree_lock); } while (1) { if (try_to_release_page(page, GFP_KERNEL)) break; if (!waitcnt) return -REMAPPREP_BUFFER; msleep(10); waitcnt--; if (!waitcnt) print_buffer(page); } }
/** * reclaim_page - reclaims one page from the inactive_clean list * @zone: reclaim a page from this zone * * The pages on the inactive_clean can be instantly reclaimed. * The tests look impressive, but most of the time we'll grab * the first page of the list and exit successfully. */ struct page * reclaim_page(zone_t * zone) { struct page * page = NULL; struct list_head * page_lru; int maxscan; /* * We only need the pagemap_lru_lock if we don't reclaim the page, * but we have to grab the pagecache_lock before the pagemap_lru_lock * to avoid deadlocks and most of the time we'll succeed anyway. */ spin_lock(&pagecache_lock); spin_lock(&pagemap_lru_lock); maxscan = zone->inactive_clean_pages; while ((page_lru = zone->inactive_clean_list.prev) != &zone->inactive_clean_list && maxscan--) { page = list_entry(page_lru, struct page, lru); /* Wrong page on list?! (list corruption, should not happen) */ if (!PageInactiveClean(page)) { printk("VM: reclaim_page, wrong page on list.\n"); list_del(page_lru); page->zone->inactive_clean_pages--; continue; } /* Page is or was in use? Move it to the active list. */ if (PageTestandClearReferenced(page) || page->age > 0 || (!page->buffers && page_count(page) > 1)) { del_page_from_inactive_clean_list(page); add_page_to_active_list(page); continue; } /* The page is dirty, or locked, move to inactive_dirty list. */ if (page->buffers || PageDirty(page) || TryLockPage(page)) { del_page_from_inactive_clean_list(page); add_page_to_inactive_dirty_list(page); continue; } /* OK, remove the page from the caches. */ if (PageSwapCache(page)) { __delete_from_swap_cache(page); goto found_page; } if (page->mapping) { __remove_inode_page(page); goto found_page; } /* We should never ever get here. */ printk(KERN_ERR "VM: reclaim_page, found unknown page\n"); list_del(page_lru); zone->inactive_clean_pages--; UnlockPage(page); } /* Reset page pointer, maybe we encountered an unfreeable page. */ page = NULL; goto out; found_page: del_page_from_inactive_clean_list(page); UnlockPage(page); page->age = PAGE_AGE_START; if (page_count(page) != 1) printk("VM: reclaim_page, found page with count %d!\n", page_count(page)); out: spin_unlock(&pagemap_lru_lock); spin_unlock(&pagecache_lock); memory_pressure++; return page; }
/* * The swap-out functions return 1 if they successfully * threw something out, and we got a free page. It returns * zero if it couldn't do anything, and any other value * indicates it decreased rss, but the page was shared. * * NOTE! If it sleeps, it *must* return 1 to make sure we * don't continue with the swap-out. Otherwise we may be * using a process that no longer actually exists (it might * have died while we slept). */ static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, unsigned long address, pte_t * page_table, int gfp_mask) { pte_t pte; swp_entry_t entry; struct page * page; int onlist; pte = *page_table; if (!pte_present(pte)) goto out_failed; page = pte_page(pte); if ((!VALID_PAGE(page)) || PageReserved(page)) goto out_failed; if (mm->swap_cnt) mm->swap_cnt--; onlist = PageActive(page); /* Don't look at this pte if it's been accessed recently. */ if (ptep_test_and_clear_young(page_table)) { age_page_up(page); goto out_failed; } if (!onlist) /* The page is still mapped, so it can't be freeable... */ age_page_down_ageonly(page); /* * If the page is in active use by us, or if the page * is in active use by others, don't unmap it or * (worse) start unneeded IO. */ if (page->age > 0) goto out_failed; if (TryLockPage(page)) goto out_failed; /* From this point on, the odds are that we're going to * nuke this pte, so read and clear the pte. This hook * is needed on CPUs which update the accessed and dirty * bits in hardware. */ pte = ptep_get_and_clear(page_table); /* * Is the page already in the swap cache? If so, then * we can just drop our reference to it without doing * any IO - it's already up-to-date on disk. * * Return 0, as we didn't actually free any real * memory, and we should just continue our scan. */ if (PageSwapCache(page)) { entry.val = page->index; if (pte_dirty(pte)) set_page_dirty(page); set_swap_pte: swap_duplicate(entry); set_pte(page_table, swp_entry_to_pte(entry)); drop_pte: UnlockPage(page); mm->rss--; flush_tlb_page(vma, address); deactivate_page(page); page_cache_release(page); out_failed: return 0; } /* * Is it a clean page? Then it must be recoverable * by just paging it in again, and we can just drop * it.. * * However, this won't actually free any real * memory, as the page will just be in the page cache * somewhere, and as such we should just continue * our scan. * * Basically, this just makes it possible for us to do * some real work in the future in "refill_inactive()". */ flush_cache_page(vma, address); if (!pte_dirty(pte)) goto drop_pte; /* * Ok, it's really dirty. That means that * we should either create a new swap cache * entry for it, or we should write it back * to its own backing store. */ if (page->mapping) { set_page_dirty(page); goto drop_pte; } /* * This is a dirty, swappable page. First of all, * get a suitable swap entry for it, and make sure * we have the swap cache set up to associate the * page with that swap entry. */ entry = get_swap_page(); if (!entry.val) goto out_unlock_restore; /* No swap space left */ /* Add it to the swap cache and mark it dirty */ add_to_swap_cache(page, entry); set_page_dirty(page); goto set_swap_pte; out_unlock_restore: set_pte(page_table, pte); UnlockPage(page); return 0; }
static int shrink_cache(int nr_pages, zone_t * classzone, unsigned int gfp_mask, int priority) { struct list_head * entry; int max_scan = nr_inactive_pages / priority; int max_mapped = min((nr_pages << (10 - priority)), max_scan / 10); spin_lock(&pagemap_lru_lock); while (--max_scan >= 0 && (entry = inactive_list.prev) != &inactive_list) { struct page * page; /* lock depth is 1 or 2 */ if (unlikely(current->need_resched)) { spin_unlock(&pagemap_lru_lock); __set_current_state(TASK_RUNNING); schedule(); spin_lock(&pagemap_lru_lock); continue; } page = list_entry(entry, struct page, lru); if (unlikely(!PageLRU(page))) BUG(); if (unlikely(PageActive(page))) BUG(); list_del(entry); list_add(entry, &inactive_list); /* * Zero page counts can happen because we unlink the pages * _after_ decrementing the usage count.. */ if (unlikely(!page_count(page))) continue; if (!memclass(page->zone, classzone)) continue; /* Racy check to avoid trylocking when not worthwhile */ if (!page->buffers && (page_count(page) != 1 || !page->mapping)) goto page_mapped; /* * The page is locked. IO in progress? * Move it to the back of the list. */ if (unlikely(TryLockPage(page))) { if (PageLaunder(page) && (gfp_mask & __GFP_FS)) { page_cache_get(page); spin_unlock(&pagemap_lru_lock); wait_on_page(page); page_cache_release(page); spin_lock(&pagemap_lru_lock); } continue; } if ((PageDirty(page) || DelallocPage(page)) && is_page_cache_freeable(page) && page->mapping) { /* * It is not critical here to write it only if * the page is unmapped beause any direct writer * like O_DIRECT would set the PG_dirty bitflag * on the phisical page after having successfully * pinned it and after the I/O to the page is finished, * so the direct writes to the page cannot get lost. */ int (*writepage)(struct page *); writepage = page->mapping->a_ops->writepage; if ((gfp_mask & __GFP_FS) && writepage) { ClearPageDirty(page); SetPageLaunder(page); page_cache_get(page); spin_unlock(&pagemap_lru_lock); writepage(page); page_cache_release(page); spin_lock(&pagemap_lru_lock); continue; } } /* * If the page has buffers, try to free the buffer mappings * associated with this page. If we succeed we try to free * the page as well. */ if (page->buffers) { spin_unlock(&pagemap_lru_lock); /* avoid to free a locked page */ page_cache_get(page); if (try_to_release_page(page, gfp_mask)) { if (!page->mapping) { /* * We must not allow an anon page * with no buffers to be visible on * the LRU, so we unlock the page after * taking the lru lock */ spin_lock(&pagemap_lru_lock); UnlockPage(page); __lru_cache_del(page); /* effectively free the page here */ page_cache_release(page); if (--nr_pages) continue; break; } else { /* * The page is still in pagecache so undo the stuff * before the try_to_release_page since we've not * finished and we can now try the next step. */ page_cache_release(page); spin_lock(&pagemap_lru_lock); } } else { /* failed to drop the buffers so stop here */ UnlockPage(page); page_cache_release(page); spin_lock(&pagemap_lru_lock); continue; } } spin_lock(&pagecache_lock); /* * this is the non-racy check for busy page. */ if (!page->mapping || !is_page_cache_freeable(page)) { spin_unlock(&pagecache_lock); UnlockPage(page); page_mapped: if (--max_mapped >= 0) continue; /* * Alert! We've found too many mapped pages on the * inactive list, so we start swapping out now! */ spin_unlock(&pagemap_lru_lock); swap_out(priority, gfp_mask, classzone); return nr_pages; } /* * It is critical to check PageDirty _after_ we made sure * the page is freeable* so not in use by anybody. */ if (PageDirty(page)) { spin_unlock(&pagecache_lock); UnlockPage(page); continue; } /* point of no return */ if (likely(!PageSwapCache(page))) { __remove_inode_page(page); spin_unlock(&pagecache_lock); } else { swp_entry_t swap; swap.val = page->index; __delete_from_swap_cache(page); spin_unlock(&pagecache_lock); swap_free(swap); } __lru_cache_del(page); UnlockPage(page); /* effectively free the page here */ page_cache_release(page); if (--nr_pages) continue; break; } spin_unlock(&pagemap_lru_lock); return nr_pages; }
/* mm->page_table_lock is held. mmap_sem is not held */ static inline int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, unsigned long address, pte_t * page_table, struct page *page, zone_t * classzone) { pte_t pte; swp_entry_t entry; /* Don't look at this pte if it's been accessed recently. */ if ((vma->vm_flags & VM_LOCKED) || ptep_test_and_clear_young(page_table)) { mark_page_accessed(page); return 0; } /* Don't bother unmapping pages that are active */ if (PageActive(page)) return 0; /* Don't bother replenishing zones not under pressure.. */ if (!memclass(page->zone, classzone)) return 0; if (TryLockPage(page)) return 0; /* From this point on, the odds are that we're going to * nuke this pte, so read and clear the pte. This hook * is needed on CPUs which update the accessed and dirty * bits in hardware. */ flush_cache_page(vma, address); pte = ptep_get_and_clear(page_table); flush_tlb_page(vma, address); if (pte_dirty(pte)) set_page_dirty(page); /* * Is the page already in the swap cache? If so, then * we can just drop our reference to it without doing * any IO - it's already up-to-date on disk. */ if (PageSwapCache(page)) { entry.val = page->index; swap_duplicate(entry); set_swap_pte: set_pte(page_table, swp_entry_to_pte(entry)); drop_pte: mm->rss--; UnlockPage(page); { int freeable = page_count(page) - !!page->buffers <= 2; page_cache_release(page); return freeable; } } /* * Is it a clean page? Then it must be recoverable * by just paging it in again, and we can just drop * it.. or if it's dirty but has backing store, * just mark the page dirty and drop it. * * However, this won't actually free any real * memory, as the page will just be in the page cache * somewhere, and as such we should just continue * our scan. * * Basically, this just makes it possible for us to do * some real work in the future in "refill_inactive()". */ if (page->mapping) goto drop_pte; if (!PageDirty(page)) goto drop_pte; /* * Anonymous buffercache pages can be left behind by * concurrent truncate and pagefault. */ if (page->buffers) goto preserve; /* * This is a dirty, swappable page. First of all, * get a suitable swap entry for it, and make sure * we have the swap cache set up to associate the * page with that swap entry. */ for (;;) { entry = get_swap_page(); if (!entry.val) break; /* Add it to the swap cache and mark it dirty * (adding to the page cache will clear the dirty * and uptodate bits, so we need to do it again) */ if (add_to_swap_cache(page, entry) == 0) { SetPageUptodate(page); set_page_dirty(page); goto set_swap_pte; } /* Raced with "speculative" read_swap_cache_async */ swap_free(entry); } /* No swap space left */ preserve: set_pte(page_table, pte); UnlockPage(page); return 0; }
static int shrink_cache(int nr_pages, zone_t * classzone, unsigned int gfp_mask, int * failed_swapout) { struct list_head * entry; int max_scan = (classzone->nr_inactive_pages + classzone->nr_active_pages) / vm_cache_scan_ratio; int max_mapped = vm_mapped_ratio * nr_pages; while (max_scan && classzone->nr_inactive_pages && (entry = inactive_list.prev) != &inactive_list) { struct page * page; if (unlikely(current->need_resched)) { spin_unlock(&pagemap_lru_lock); __set_current_state(TASK_RUNNING); schedule(); spin_lock(&pagemap_lru_lock); continue; } page = list_entry(entry, struct page, lru); BUG_ON(!PageLRU(page)); BUG_ON(PageActive(page)); list_del(entry); list_add(entry, &inactive_list); /* * Zero page counts can happen because we unlink the pages * _after_ decrementing the usage count.. */ if (unlikely(!page_count(page))) continue; if (!memclass(page_zone(page), classzone)) continue; max_scan--; /* Racy check to avoid trylocking when not worthwhile */ if (!page->buffers && (page_count(page) != 1 || !page->mapping)) goto page_mapped; /* * The page is locked. IO in progress? * Move it to the back of the list. */ if (unlikely(TryLockPage(page))) { if (PageLaunder(page) && (gfp_mask & __GFP_FS)) { page_cache_get(page); spin_unlock(&pagemap_lru_lock); wait_on_page(page); page_cache_release(page); spin_lock(&pagemap_lru_lock); } continue; } if (PageDirty(page) && is_page_cache_freeable(page) && page->mapping) { /* * It is not critical here to write it only if * the page is unmapped beause any direct writer * like O_DIRECT would set the PG_dirty bitflag * on the phisical page after having successfully * pinned it and after the I/O to the page is finished, * so the direct writes to the page cannot get lost. */ int (*writepage)(struct page *); writepage = page->mapping->a_ops->writepage; if ((gfp_mask & __GFP_FS) && writepage) { ClearPageDirty(page); SetPageLaunder(page); page_cache_get(page); spin_unlock(&pagemap_lru_lock); writepage(page); page_cache_release(page); spin_lock(&pagemap_lru_lock); continue; } } /* * If the page has buffers, try to free the buffer mappings * associated with this page. If we succeed we try to free * the page as well. */ if (page->buffers) { spin_unlock(&pagemap_lru_lock); /* avoid to free a locked page */ page_cache_get(page); if (try_to_release_page(page, gfp_mask)) { if (!page->mapping) { /* * We must not allow an anon page * with no buffers to be visible on * the LRU, so we unlock the page after * taking the lru lock */ spin_lock(&pagemap_lru_lock); UnlockPage(page); __lru_cache_del(page); /* effectively free the page here */ page_cache_release(page); if (--nr_pages) continue; break; } else { /* * The page is still in pagecache so undo the stuff * before the try_to_release_page since we've not * finished and we can now try the next step. */ page_cache_release(page); spin_lock(&pagemap_lru_lock); } } else { /* failed to drop the buffers so stop here */ UnlockPage(page); page_cache_release(page); spin_lock(&pagemap_lru_lock); continue; } } spin_lock(&pagecache_lock); /* * This is the non-racy check for busy page. * It is critical to check PageDirty _after_ we made sure * the page is freeable so not in use by anybody. * At this point we're guaranteed that page->buffers is NULL, * nobody can refill page->buffers under us because we still * hold the page lock. */ if (!page->mapping || page_count(page) > 1) { spin_unlock(&pagecache_lock); UnlockPage(page); page_mapped: if (--max_mapped < 0) { spin_unlock(&pagemap_lru_lock); nr_pages -= kmem_cache_reap(gfp_mask); if (nr_pages <= 0) goto out; shrink_dcache_memory(vm_vfs_scan_ratio, gfp_mask); shrink_icache_memory(vm_vfs_scan_ratio, gfp_mask); #ifdef CONFIG_QUOTA shrink_dqcache_memory(vm_vfs_scan_ratio, gfp_mask); #endif if (!*failed_swapout) *failed_swapout = !swap_out(classzone); max_mapped = nr_pages * vm_mapped_ratio; spin_lock(&pagemap_lru_lock); refill_inactive(nr_pages, classzone); } continue; } if (PageDirty(page)) { spin_unlock(&pagecache_lock); UnlockPage(page); continue; } __lru_cache_del(page); /* point of no return */ if (likely(!PageSwapCache(page))) { __remove_inode_page(page); spin_unlock(&pagecache_lock); } else { swp_entry_t swap; swap.val = page->index; __delete_from_swap_cache(page); spin_unlock(&pagecache_lock); swap_free(swap); } UnlockPage(page); /* effectively free the page here */ page_cache_release(page); if (--nr_pages) continue; break; } spin_unlock(&pagemap_lru_lock); out: return nr_pages; }
page_cache_release(page); } } return error; } /* * This must be called only on pages that have * been verified to be in the swap cache. */ void __delete_from_swap_cache(struct page *page) { swp_entry_t ent = {.val = page_private(page)}; VM_BUG_ON(!PageLocked(page)); VM_BUG_ON(!PageSwapCache(page)); VM_BUG_ON(PageWriteback(page)); radix_tree_delete(&swapper_space.page_tree, page_private(page)); set_page_private(page, 0); ClearPageSwapCache(page); total_swapcache_pages--; __dec_zone_page_state(page, NR_FILE_PAGES); INC_CACHE_INFO(del_total); mem_cgroup_uncharge_swapcache(page, ent); } /** * add_to_swap - allocate swap space for a page * @page: page we want to move to swap * @gfp_mask: memory allocation flags
void show_mem(void) { int i,free = 0,total = 0,reserved = 0; int shared = 0, cached = 0; struct task_struct *p; int highmem = 0; printk("Mem-info:\n"); show_free_areas(); printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10)); i = max_mapnr; while (i-- > 0) { total++; if (PageHighMem(mem_map+i)) highmem++; if (PageReserved(mem_map+i)) reserved++; else if (PageSwapCache(mem_map+i)) cached++; else if (!page_count(mem_map+i)) free++; else shared += atomic_read(&mem_map[i].count) - 1; } printk("%d pages of RAM\n",total); printk("%d pages of HIGHMEM\n", highmem); printk("%d free pages\n",free); printk("%d reserved pages\n",reserved); printk("%d pages shared\n",shared); printk("%d pages swap cached\n",cached); printk("%d pages in page table cache\n",(int)pgtable_cache_size); show_buffers(); printk("%-8s %3s %8s %8s %8s %9s %8s", "Process", "Pid", "Ctx", "Ctx<<4", "Last Sys", "pc", "task"); #ifdef CONFIG_SMP printk(" %3s", "CPU"); #endif /* CONFIG_SMP */ printk("\n"); for_each_task(p) { printk("%-8.8s %3d %8ld %8ld %8ld %c%08lx %08lx ", p->comm,p->pid, (p->mm)?p->mm->context:0, (p->mm)?(p->mm->context<<4):0, p->thread.last_syscall, (p->thread.regs)?user_mode(p->thread.regs) ? 'u' : 'k' : '?', (p->thread.regs)?p->thread.regs->nip:0, (ulong)p); { int iscur = 0; #ifdef CONFIG_SMP printk("%3d ", p->processor); if ( (p->processor != NO_PROC_ID) && (p == current_set[p->processor]) ) { iscur = 1; printk("current"); } #else if ( p == current ) { iscur = 1; printk("current"); } if ( p == last_task_used_math ) { if ( iscur ) printk(","); printk("last math"); } #endif /* CONFIG_SMP */ printk("\n"); } } }