/* * Free the swap entry like above, but also try to * free the page cache entry if it is the last user. */ void free_swap_and_cache(swp_entry_t entry) { struct swap_info_struct * p; struct page *page = NULL; if (is_migration_entry(entry)) return; p = swap_info_get(entry); if (p) { if (swap_entry_free(p, swp_offset(entry)) == 1) { page = find_get_page(&swapper_space, entry.val); if (page && unlikely(TestSetPageLocked(page))) { page_cache_release(page); page = NULL; } } spin_unlock(&swap_lock); } if (page) { int one_user; BUG_ON(PagePrivate(page)); one_user = (page_count(page) == 2); /* Only cache user (+us), or swap space full? Free it! */ /* Also recheck PageSwapCache after page is locked (above) */ if (PageSwapCache(page) && !PageWriteback(page) && (one_user || vm_swap_full())) { delete_from_swap_cache(page); SetPageDirty(page); } unlock_page(page); page_cache_release(page); } }
/* * Trying to stop swapping from a file is fraught with races, so * we repeat quite a bit here when we have to pause. swapoff() * isn't exactly timing-critical, so who cares (but this is /really/ * inefficient, ugh). * * We return 1 after having slept, which makes the process start over * from the beginning for this process.. */ static inline int unuse_pte(struct vm_area_struct * vma, unsigned long address, pte_t *dir, unsigned int type, unsigned long page) { pte_t pte = *dir; if (pte_none(pte)) return 0; if (pte_present(pte)) { unsigned long page_nr = MAP_NR(pte_page(pte)); if (page_nr >= MAP_NR(high_memory)) return 0; if (!in_swap_cache(page_nr)) return 0; if (SWP_TYPE(in_swap_cache(page_nr)) != type) return 0; delete_from_swap_cache(page_nr); set_pte(dir, pte_mkdirty(pte)); return 0; } if (SWP_TYPE(pte_val(pte)) != type) return 0; read_swap_page(pte_val(pte), (char *) page); #if 0 /* Is this really needed here, hasn't it been solved elsewhere? */ flush_page_to_ram(page); #endif if (pte_val(*dir) != pte_val(pte)) { free_page(page); return 1; } set_pte(dir, pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)))); flush_tlb_page(vma, address); ++vma->vm_mm->rss; swap_free(pte_val(pte)); return 1; }
static inline void copy_one_pte(pte_t * old_pte, pte_t * new_pte, int cow) { pte_t pte = *old_pte; unsigned long page_nr; if (pte_none(pte)) return; if (!pte_present(pte)) { swap_duplicate(pte_val(pte)); set_pte(new_pte, pte); return; } page_nr = MAP_NR(pte_page(pte)); if (page_nr >= MAP_NR(high_memory) || PageReserved(mem_map+page_nr)) { set_pte(new_pte, pte); return; } if (cow) pte = pte_wrprotect(pte); if (delete_from_swap_cache(page_nr)) pte = pte_mkdirty(pte); set_pte(new_pte, pte_mkold(pte)); set_pte(old_pte, pte); mem_map[page_nr].count++; }
/* * Strange swizzling function for shmem_getpage (and shmem_unuse) */ int move_from_swap_cache(struct page *page, unsigned long index, struct address_space *mapping) { int err = add_to_page_cache(page, mapping, index, GFP_ATOMIC); if (!err) { delete_from_swap_cache(page); /* shift page from clean_pages to dirty_pages list */ ClearPageDirty(page); set_page_dirty(page); } return err; }
void free_page_and_swap_cache(unsigned long addr) { struct page *page = mem_map + MAP_NR(addr); /* * If we are the only user, then free up the swap cache. */ if (PageSwapCache(page) && !is_page_shared(page)) { delete_from_swap_cache(page); } __free_page(page); }
/* * Free the swap entry like above, but also try to * free the page cache entry if it is the last user. */ void free_swap_and_cache(swp_entry_t entry) { struct swap_info_struct * p; struct page *page = NULL; p = swap_info_get(entry); if (p) { if (swap_entry_free(p, SWP_OFFSET(entry)) == 1) page = find_trylock_page(&swapper_space, entry.val); swap_info_put(p); } if (page) { page_cache_get(page); /* Only cache user (+us), or swap space full? Free it! */ if (page_count(page) - !!page->buffers == 2 || vm_swap_full()) { delete_from_swap_cache(page); SetPageDirty(page); } UnlockPage(page); page_cache_release(page); } }
/* * We completely avoid races by reading each swap page in advance, * and then search for the process using it. All the necessary * page table adjustments can then be made atomically. */ static int try_to_unuse(unsigned int type) { struct swap_info_struct * si = &swap_info[type]; struct mm_struct *start_mm; unsigned short *swap_map; unsigned short swcount; struct page *page; swp_entry_t entry; int i = 0; int retval = 0; int reset_overflow = 0; /* * When searching mms for an entry, a good strategy is to * start at the first mm we freed the previous entry from * (though actually we don't notice whether we or coincidence * freed the entry). Initialize this start_mm with a hold. * * A simpler strategy would be to start at the last mm we * freed the previous entry from; but that would take less * advantage of mmlist ordering (now preserved by swap_out()), * which clusters forked address spaces together, most recent * child immediately after parent. If we race with dup_mmap(), * we very much want to resolve parent before child, otherwise * we may miss some entries: using last mm would invert that. */ start_mm = &init_mm; atomic_inc(&init_mm.mm_users); /* * Keep on scanning until all entries have gone. Usually, * one pass through swap_map is enough, but not necessarily: * mmput() removes mm from mmlist before exit_mmap() and its * zap_page_range(). That's not too bad, those entries are * on their way out, and handled faster there than here. * do_munmap() behaves similarly, taking the range out of mm's * vma list before zap_page_range(). But unfortunately, when * unmapping a part of a vma, it takes the whole out first, * then reinserts what's left after (might even reschedule if * open() method called) - so swap entries may be invisible * to swapoff for a while, then reappear - but that is rare. */ while ((i = find_next_to_unuse(si, i))) { /* * Get a page for the entry, using the existing swap * cache page if there is one. Otherwise, get a clean * page and read the swap into it. */ swap_map = &si->swap_map[i]; entry = SWP_ENTRY(type, i); page = read_swap_cache_async(entry); if (!page) { /* * Either swap_duplicate() failed because entry * has been freed independently, and will not be * reused since sys_swapoff() already disabled * allocation from here, or alloc_page() failed. */ if (!*swap_map) continue; retval = -ENOMEM; break; } /* * Don't hold on to start_mm if it looks like exiting. */ if (atomic_read(&start_mm->mm_users) == 1) { mmput(start_mm); start_mm = &init_mm; atomic_inc(&init_mm.mm_users); } /* * Wait for and lock page. When do_swap_page races with * try_to_unuse, do_swap_page can handle the fault much * faster than try_to_unuse can locate the entry. This * apparently redundant "wait_on_page" lets try_to_unuse * defer to do_swap_page in such a case - in some tests, * do_swap_page and try_to_unuse repeatedly compete. */ wait_on_page(page); lock_page(page); /* * Remove all references to entry, without blocking. * Whenever we reach init_mm, there's no address space * to search, but use it as a reminder to search shmem. */ swcount = *swap_map; if (swcount > 1) { flush_page_to_ram(page); if (start_mm == &init_mm) shmem_unuse(entry, page); else unuse_process(start_mm, entry, page); } if (*swap_map > 1) { int set_start_mm = (*swap_map >= swcount); struct list_head *p = &start_mm->mmlist; struct mm_struct *new_start_mm = start_mm; struct mm_struct *mm; spin_lock(&mmlist_lock); while (*swap_map > 1 && (p = p->next) != &start_mm->mmlist) { mm = list_entry(p, struct mm_struct, mmlist); swcount = *swap_map; if (mm == &init_mm) { set_start_mm = 1; shmem_unuse(entry, page); } else unuse_process(mm, entry, page); if (set_start_mm && *swap_map < swcount) { new_start_mm = mm; set_start_mm = 0; } } atomic_inc(&new_start_mm->mm_users); spin_unlock(&mmlist_lock); mmput(start_mm); start_mm = new_start_mm; } /* * How could swap count reach 0x7fff when the maximum * pid is 0x7fff, and there's no way to repeat a swap * page within an mm (except in shmem, where it's the * shared object which takes the reference count)? * We believe SWAP_MAP_MAX cannot occur in Linux 2.4. * * If that's wrong, then we should worry more about * exit_mmap() and do_munmap() cases described above: * we might be resetting SWAP_MAP_MAX too early here. * We know "Undead"s can happen, they're okay, so don't * report them; but do report if we reset SWAP_MAP_MAX. */ if (*swap_map == SWAP_MAP_MAX) { swap_list_lock(); swap_device_lock(si); nr_swap_pages++; *swap_map = 1; swap_device_unlock(si); swap_list_unlock(); reset_overflow = 1; } /* * If a reference remains (rare), we would like to leave * the page in the swap cache; but try_to_swap_out could * then re-duplicate the entry once we drop page lock, * so we might loop indefinitely; also, that page could * not be swapped out to other storage meanwhile. So: * delete from cache even if there's another reference, * after ensuring that the data has been saved to disk - * since if the reference remains (rarer), it will be * read from disk into another page. Splitting into two * pages would be incorrect if swap supported "shared * private" pages, but they are handled by tmpfs files. * Note shmem_unuse already deleted its from swap cache. */ if ((*swap_map > 1) && PageDirty(page) && PageSwapCache(page)) { rw_swap_page(WRITE, page); lock_page(page); } if (PageSwapCache(page)) delete_from_swap_cache(page); /* * So we could skip searching mms once swap count went * to 1, we did not mark any present ptes as dirty: must * mark page dirty so try_to_swap_out will preserve it. */ SetPageDirty(page); UnlockPage(page); page_cache_release(page); /* * Make sure that we aren't completely killing * interactive performance. Interruptible check on * signal_pending() would be nice, but changes the spec? */ if (current->need_resched) schedule(); }
/* * We completely avoid races by reading each swap page in advance, * and then search for the process using it. All the necessary * page table adjustments can then be made atomically. */ static int try_to_unuse(unsigned int type) { struct swap_info_struct * si = &swap_info[type]; struct mm_struct *start_mm; unsigned short *swap_map; unsigned short swcount; struct page *page; swp_entry_t entry; unsigned int i = 0; int retval = 0; int reset_overflow = 0; int shmem; /* * When searching mms for an entry, a good strategy is to * start at the first mm we freed the previous entry from * (though actually we don't notice whether we or coincidence * freed the entry). Initialize this start_mm with a hold. * * A simpler strategy would be to start at the last mm we * freed the previous entry from; but that would take less * advantage of mmlist ordering, which clusters forked mms * together, child after parent. If we race with dup_mmap(), we * prefer to resolve parent before child, lest we miss entries * duplicated after we scanned child: using last mm would invert * that. Though it's only a serious concern when an overflowed * swap count is reset from SWAP_MAP_MAX, preventing a rescan. */ start_mm = &init_mm; atomic_inc(&init_mm.mm_users); /* * Keep on scanning until all entries have gone. Usually, * one pass through swap_map is enough, but not necessarily: * there are races when an instance of an entry might be missed. */ while ((i = find_next_to_unuse(si, i)) != 0) { if (signal_pending(current)) { retval = -EINTR; break; } /* * Get a page for the entry, using the existing swap * cache page if there is one. Otherwise, get a clean * page and read the swap into it. */ swap_map = &si->swap_map[i]; entry = swp_entry(type, i); page = read_swap_cache_async(entry, NULL, 0); if (!page) { /* * Either swap_duplicate() failed because entry * has been freed independently, and will not be * reused since sys_swapoff() already disabled * allocation from here, or alloc_page() failed. */ if (!*swap_map) continue; retval = -ENOMEM; break; } /* * Don't hold on to start_mm if it looks like exiting. */ if (atomic_read(&start_mm->mm_users) == 1) { mmput(start_mm); start_mm = &init_mm; atomic_inc(&init_mm.mm_users); } /* * Wait for and lock page. When do_swap_page races with * try_to_unuse, do_swap_page can handle the fault much * faster than try_to_unuse can locate the entry. This * apparently redundant "wait_on_page_locked" lets try_to_unuse * defer to do_swap_page in such a case - in some tests, * do_swap_page and try_to_unuse repeatedly compete. */ wait_on_page_locked(page); wait_on_page_writeback(page); lock_page(page); wait_on_page_writeback(page); /* * Remove all references to entry. * Whenever we reach init_mm, there's no address space * to search, but use it as a reminder to search shmem. */ shmem = 0; swcount = *swap_map; if (swcount > 1) { if (start_mm == &init_mm) shmem = shmem_unuse(entry, page); else retval = unuse_mm(start_mm, entry, page); } if (*swap_map > 1) { int set_start_mm = (*swap_map >= swcount); struct list_head *p = &start_mm->mmlist; struct mm_struct *new_start_mm = start_mm; struct mm_struct *prev_mm = start_mm; struct mm_struct *mm; atomic_inc(&new_start_mm->mm_users); atomic_inc(&prev_mm->mm_users); spin_lock(&mmlist_lock); while (*swap_map > 1 && !retval && (p = p->next) != &start_mm->mmlist) { mm = list_entry(p, struct mm_struct, mmlist); if (!atomic_inc_not_zero(&mm->mm_users)) continue; spin_unlock(&mmlist_lock); mmput(prev_mm); prev_mm = mm; cond_resched(); swcount = *swap_map; if (swcount <= 1) ; else if (mm == &init_mm) { set_start_mm = 1; shmem = shmem_unuse(entry, page); } else retval = unuse_mm(mm, entry, page); if (set_start_mm && *swap_map < swcount) { mmput(new_start_mm); atomic_inc(&mm->mm_users); new_start_mm = mm; set_start_mm = 0; } spin_lock(&mmlist_lock); } spin_unlock(&mmlist_lock); mmput(prev_mm); mmput(start_mm); start_mm = new_start_mm; } if (retval) { unlock_page(page); page_cache_release(page); break; } /* * How could swap count reach 0x7fff when the maximum * pid is 0x7fff, and there's no way to repeat a swap * page within an mm (except in shmem, where it's the * shared object which takes the reference count)? * We believe SWAP_MAP_MAX cannot occur in Linux 2.4. * * If that's wrong, then we should worry more about * exit_mmap() and do_munmap() cases described above: * we might be resetting SWAP_MAP_MAX too early here. * We know "Undead"s can happen, they're okay, so don't * report them; but do report if we reset SWAP_MAP_MAX. */ if (*swap_map == SWAP_MAP_MAX) { spin_lock(&swap_lock); *swap_map = 1; spin_unlock(&swap_lock); reset_overflow = 1; } /* * If a reference remains (rare), we would like to leave * the page in the swap cache; but try_to_unmap could * then re-duplicate the entry once we drop page lock, * so we might loop indefinitely; also, that page could * not be swapped out to other storage meanwhile. So: * delete from cache even if there's another reference, * after ensuring that the data has been saved to disk - * since if the reference remains (rarer), it will be * read from disk into another page. Splitting into two * pages would be incorrect if swap supported "shared * private" pages, but they are handled by tmpfs files. * * Note shmem_unuse already deleted a swappage from * the swap cache, unless the move to filepage failed: * in which case it left swappage in cache, lowered its * swap count to pass quickly through the loops above, * and now we must reincrement count to try again later. */ if ((*swap_map > 1) && PageDirty(page) && PageSwapCache(page)) { struct writeback_control wbc = { .sync_mode = WB_SYNC_NONE, }; swap_writepage(page, &wbc); lock_page(page); wait_on_page_writeback(page); } if (PageSwapCache(page)) { if (shmem) swap_duplicate(entry); else delete_from_swap_cache(page); } /* * So we could skip searching mms once swap count went * to 1, we did not mark any present ptes as dirty: must * mark page dirty so shrink_page_list will preserve it. */ SetPageDirty(page); unlock_page(page); page_cache_release(page); /* * Make sure that we aren't completely killing * interactive performance. */ cond_resched(); }
static int unswap_by_move(unsigned short *map, unsigned long max, unsigned long start, unsigned long n_pages) { struct task_struct *p; unsigned long entry, rover = (start == 1) ? n_pages+1 : 1; unsigned long i, j; DPRINTK( "unswapping %lu..%lu by moving in swap\n", start, start+n_pages-1 ); /* can free the allocated pages by moving them to other swap pages */ for( i = start; i < start+n_pages; ++i ) { if (!map[i]) { map[i] = SWAP_MAP_BAD; DPRINTK( "unswap: page %lu was free\n", i ); continue; } else if (map[i] == SWAP_MAP_BAD) { printk( KERN_ERR "get_stram_region: page %lu already " "reserved??\n", i ); } DPRINTK( "unswap: page %lu is alloced, count=%u\n", i, map[i] ); /* find a free page not in our region */ for( j = rover; j != rover-1; j = (j == max-1) ? 1 : j+1 ) { if (j >= start && j < start+n_pages) continue; if (!map[j]) { rover = j+1; break; } } if (j == rover-1) { printk( KERN_ERR "get_stram_region: not enough free swap " "pages now??\n" ); return( -ENOMEM ); } DPRINTK( "unswap: map[i=%lu]=%u map[j=%lu]=%u nr_swap=%u\n", i, map[i], j, map[j], nr_swap_pages ); --nr_swap_pages; entry = SWP_ENTRY( stram_swap_type, j ); if (stram_swap_info->lowest_bit == j) stram_swap_info->lowest_bit++; if (stram_swap_info->highest_bit == j) stram_swap_info->highest_bit--; memcpy( SWAP_ADDR(j), SWAP_ADDR(i), PAGE_SIZE ); #ifdef DO_PROC stat_swap_move++; #endif while( map[i] ) { read_lock(&tasklist_lock); for_each_task(p) { if (unswap_process( p->mm, SWP_ENTRY( stram_swap_type, i ), entry, 1 )) { read_unlock(&tasklist_lock); map[j]++; goto repeat; } } read_unlock(&tasklist_lock); if (map[i] && map[i] != SWAP_MAP_MAX) { printk( KERN_ERR "get_stram_region: ST-RAM swap page %lu " "not used by any process\n", i ); /* quit while loop and overwrite bad map entry */ break; } else if (!map[i]) { /* somebody else must have swapped in that page, so free the * new one (we're moving to) */ DPRINTK( "unswap: map[i] became 0, also clearing map[j]\n" ); map[j] = 0; } repeat: } DPRINTK( "unswap: map[i=%lu]=%u map[j=%lu]=%u nr_swap=%u\n", i, map[i], j, map[j], nr_swap_pages ); map[i] = SWAP_MAP_BAD; if (stram_swap_info->lowest_bit == i) stram_swap_info->lowest_bit++; if (stram_swap_info->highest_bit == i) stram_swap_info->highest_bit--; --nr_swap_pages; } return( 0 ); } #endif static int unswap_by_read(unsigned short *map, unsigned long max, unsigned long start, unsigned long n_pages) { struct task_struct *p; unsigned long entry, page; unsigned long i; struct page *page_map; DPRINTK( "unswapping %lu..%lu by reading in\n", start, start+n_pages-1 ); for( i = start; i < start+n_pages; ++i ) { if (map[i] == SWAP_MAP_BAD) { printk( KERN_ERR "get_stram_region: page %lu already " "reserved??\n", i ); continue; } if (map[i]) { entry = SWP_ENTRY(stram_swap_type, i); DPRINTK("unswap: map[i=%lu]=%u nr_swap=%u\n", i, map[i], nr_swap_pages); /* Get a page for the entry, using the existing swap cache page if there is one. Otherwise, get a clean page and read the swap into it. */ page_map = read_swap_cache(entry); if (page_map) { page = page_address(page_map); read_lock(&tasklist_lock); for_each_task(p) unswap_process(p->mm, entry, page /* , 0 */); read_unlock(&tasklist_lock); shm_unuse(entry, page); /* Now get rid of the extra reference to the temporary page we've been using. */ if (PageSwapCache(page_map)) delete_from_swap_cache(page_map); __free_page(page_map); #ifdef DO_PROC stat_swap_force++; #endif } else if (map[i]) return -ENOMEM; } DPRINTK( "unswap: map[i=%lu]=%u nr_swap=%u\n", i, map[i], nr_swap_pages ); map[i] = SWAP_MAP_BAD; if (stram_swap_info->lowest_bit == i) stram_swap_info->lowest_bit++; if (stram_swap_info->highest_bit == i) stram_swap_info->highest_bit--; --nr_swap_pages; } return 0; } /* * reserve a region in ST-RAM swap space for an allocation */ static void *get_stram_region( unsigned long n_pages ) { unsigned short *map = stram_swap_info->swap_map; unsigned long max = stram_swap_info->max; unsigned long start, total_free, region_free; int err; void *ret = NULL; DPRINTK( "get_stram_region(n_pages=%lu)\n", n_pages ); down(&stram_swap_sem); /* disallow writing to the swap device now */ stram_swap_info->flags = SWP_USED; /* find a region of n_pages pages in the swap space including as much free * pages as possible (and excluding any already-reserved pages). */ if (!(start = find_free_region( n_pages, &total_free, ®ion_free ))) goto end; DPRINTK( "get_stram_region: region starts at %lu, has %lu free pages\n", start, region_free ); #if 0 err = ((total_free-region_free >= n_pages-region_free) ? unswap_by_move( map, max, start, n_pages ) : unswap_by_read( map, max, start, n_pages )); #else err = unswap_by_read(map, max, start, n_pages); #endif if (err) goto end; ret = SWAP_ADDR(start); end: /* allow using swap device again */ stram_swap_info->flags = SWP_WRITEOK; up(&stram_swap_sem); DPRINTK( "get_stram_region: returning %p\n", ret ); return( ret ); }