struct page * read_swap_cache_async(unsigned long entry, int wait) { struct page *found_page = 0, *new_page; unsigned long new_page_addr; #ifdef DEBUG_SWAP printk("DebugVM: read_swap_cache_async entry %08lx%s\n", entry, wait ? ", wait" : ""); #endif /* * Make sure the swap entry is still in use. */ if (!swap_duplicate(entry)) /* Account for the swap cache */ goto out; /* * Look for the page in the swap cache. */ found_page = lookup_swap_cache(entry); if (found_page) goto out_free_swap; new_page_addr = __get_free_page(GFP_USER); if (!new_page_addr) goto out_free_swap; /* Out of memory */ new_page = mem_map + MAP_NR(new_page_addr); /* * Check the swap cache again, in case we stalled above. */ found_page = lookup_swap_cache(entry); if (found_page) goto out_free_page; /* * Add it to the swap cache and read its contents. */ if (!add_to_swap_cache(new_page, entry)) goto out_free_page; set_bit(PG_locked, &new_page->flags); rw_swap_page(READ, entry, (char *) new_page_addr, wait); #ifdef DEBUG_SWAP printk("DebugVM: read_swap_cache_async created " "entry %08lx at %p\n", entry, (char *) page_address(new_page)); #endif return new_page; out_free_page: __free_page(new_page); out_free_swap: swap_free(entry); out: return found_page; }
struct page * read_swap_cache_async(swp_entry_t entry, int wait) { struct page *found_page = 0, *new_page; unsigned long new_page_addr; /* * Make sure the swap entry is still in use. */ if (!swap_duplicate(entry)) /* Account for the swap cache */ goto out; /* * Look for the page in the swap cache. */ found_page = lookup_swap_cache(entry); if (found_page) goto out_free_swap; new_page_addr = __get_free_page(GFP_USER); if (!new_page_addr) goto out_free_swap; /* Out of memory */ new_page = virt_to_page(new_page_addr); /* * Check the swap cache again, in case we stalled above. */ found_page = lookup_swap_cache(entry); if (found_page) goto out_free_page; /* * Add it to the swap cache and read its contents. */ lock_page(new_page); add_to_swap_cache(new_page, entry); rw_swap_page(READ, new_page, wait); return new_page; out_free_page: page_cache_release(new_page); out_free_swap: swap_free(entry); out: return found_page; }
static int do_swap_page(struct mm_struct * mm, struct vm_area_struct * vma, unsigned long address, pte_t * page_table, swp_entry_t entry, int write_access) { struct page *page = lookup_swap_cache(entry); pte_t pte; if (!page) { lock_kernel(); swapin_readahead(entry); page = read_swap_cache(entry); unlock_kernel(); if (!page) return -1; flush_page_to_ram(page); flush_icache_page(vma, page); } mm->rss++; pte = mk_pte(page, vma->vm_page_prot); /* * Freeze the "shared"ness of the page, ie page_count + swap_count. * Must lock page before transferring our swap count to already * obtained page count. */ lock_page(page); swap_free(entry); if (write_access && !is_page_shared(page)) pte = pte_mkwrite(pte_mkdirty(pte)); UnlockPage(page); set_pte(page_table, pte); /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, address, pte); return 1; /* Minor fault */ }
/* * We hold the mm semaphore and the page_table_lock on entry and * should release the pagetable lock on exit.. */ static int do_swap_page(struct mm_struct * mm, struct vm_area_struct * vma, unsigned long address, pte_t * page_table, pte_t orig_pte, int write_access) { struct page *page; swp_entry_t entry = pte_to_swp_entry(orig_pte); pte_t pte; int ret = 1; spin_unlock(&mm->page_table_lock); page = lookup_swap_cache(entry); if (!page) { swapin_readahead(entry); page = read_swap_cache_async(entry); if (!page) { /* * Back out if somebody else faulted in this pte while * we released the page table lock. */ int retval; spin_lock(&mm->page_table_lock); retval = pte_same(*page_table, orig_pte) ? -1 : 1; spin_unlock(&mm->page_table_lock); return retval; } /* Had to read the page from swap area: Major fault */ ret = 2; } lock_page(page); /* * Back out if somebody else faulted in this pte while we * released the page table lock. */ spin_lock(&mm->page_table_lock); if (!pte_same(*page_table, orig_pte)) { spin_unlock(&mm->page_table_lock); unlock_page(page); page_cache_release(page); return 1; } /* The page isn't present yet, go ahead with the fault. */ swap_free(entry); if (vm_swap_full()) remove_exclusive_swap_page(page); mm->rss++; pte = mk_pte(page, vma->vm_page_prot); if (write_access && can_share_swap_page(page)) pte = pte_mkdirty(pte_mkwrite(pte)); unlock_page(page); flush_page_to_ram(page); flush_icache_page(vma, page); set_pte(page_table, pte); /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, address, pte); spin_unlock(&mm->page_table_lock); return ret; }