static inline int copy_pte_range(pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long address, unsigned long size, int cow) { pte_t * src_pte, * dst_pte; unsigned long end; if (pmd_none(*src_pmd)) return 0; if (pmd_bad(*src_pmd)) { printk("copy_pte_range: bad pmd (%08lx)\n", pmd_val(*src_pmd)); pmd_clear(src_pmd); return 0; } src_pte = pte_offset(src_pmd, address); if (pmd_none(*dst_pmd)) { if (!pte_alloc(dst_pmd, 0)) return -ENOMEM; } dst_pte = pte_offset(dst_pmd, address); address &= ~PMD_MASK; end = address + size; if (end >= PMD_SIZE) end = PMD_SIZE; do { /* I would like to switch arguments here, to make it * consistent with copy_xxx_range and memcpy syntax. */ copy_one_pte(src_pte++, dst_pte++, cow); address += PAGE_SIZE; } while (address < end); return 0; }
static int move_one_page(struct mm_struct *mm, unsigned long old_addr, unsigned long new_addr) { int error = 0; pte_t * src; src = get_one_pte(mm, old_addr); if (src) error = copy_one_pte(src, alloc_one_pte(mm, new_addr)); return error; }
static int move_one_page(struct mm_struct *mm, unsigned long old_addr, unsigned long new_addr) { int error = 0; pte_t * src, * dst; spin_lock(&mm->page_table_lock); src = get_one_pte(mm, old_addr); if (src) { dst = alloc_one_pte(mm, new_addr); src = get_one_pte(mm, old_addr); if (src) error = copy_one_pte(mm, src, dst); } spin_unlock(&mm->page_table_lock); return error; }
static int move_one_page(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr) { struct mm_struct *mm = vma->vm_mm; struct pte_chain * pte_chain; int error = 0; pte_t *src, *dst; pte_chain = pte_chain_alloc(GFP_KERNEL); if (!pte_chain) return -1; spin_lock(&mm->page_table_lock); src = get_one_pte_map_nested(mm, old_addr); if (src) { /* * Look to see whether alloc_one_pte_map needs to perform a * memory allocation. If it does then we need to drop the * atomic kmap */ if (!page_table_present(mm, new_addr)) { pte_unmap_nested(src); src = NULL; } dst = alloc_one_pte_map(mm, new_addr); if (src == NULL) src = get_one_pte_map_nested(mm, old_addr); if (src) { error = copy_one_pte(vma, src, dst, old_addr, new_addr, &pte_chain); pte_unmap_nested(src); } pte_unmap(dst); } flush_tlb_page(vma, old_addr); spin_unlock(&mm->page_table_lock); pte_chain_free(pte_chain); return error; }