/* mmlist_lock and vma->vm_mm->page_table_lock are held */ static inline void unuse_pmd(struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long size, unsigned long offset, swp_entry_t entry, struct page* page) { struct pte_chain * pte_chain = NULL; pte_t *pte, *mapping; unsigned long end; if (pmd_none(*dir)) return; if (pmd_bad(*dir)) { pmd_ERROR(*dir); pmd_clear(dir); return; } mapping = pte = pte_offset_map(dir, address); offset += address & PMD_MASK; address &= ~PMD_MASK; end = address + size; if (end > PMD_SIZE) end = PMD_SIZE; do { /* * FIXME: handle pte_chain_alloc() failures */ if (pte_chain == NULL) pte_chain = pte_chain_alloc(GFP_ATOMIC); unuse_pte(vma, offset+address-vma->vm_start, pte, entry, page, &pte_chain); address += PAGE_SIZE; pte++; } while (address && (address < end)); pte_unmap(mapping); pte_chain_free(pte_chain); }
static int move_one_page(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr) { struct mm_struct *mm = vma->vm_mm; struct pte_chain * pte_chain; int error = 0; pte_t *src, *dst; pte_chain = pte_chain_alloc(GFP_KERNEL); if (!pte_chain) return -1; spin_lock(&mm->page_table_lock); src = get_one_pte_map_nested(mm, old_addr); if (src) { /* * Look to see whether alloc_one_pte_map needs to perform a * memory allocation. If it does then we need to drop the * atomic kmap */ if (!page_table_present(mm, new_addr)) { pte_unmap_nested(src); src = NULL; } dst = alloc_one_pte_map(mm, new_addr); if (src == NULL) src = get_one_pte_map_nested(mm, old_addr); if (src) { error = copy_one_pte(vma, src, dst, old_addr, new_addr, &pte_chain); pte_unmap_nested(src); } pte_unmap(dst); } flush_tlb_page(vma, old_addr); spin_unlock(&mm->page_table_lock); pte_chain_free(pte_chain); return error; }