/* mmlist_lock and vma->vm_mm->page_table_lock are held */ static inline void unuse_pmd(struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long size, unsigned long offset, swp_entry_t entry, struct page* page) { pte_t * pte; unsigned long end; if (pmd_none(*dir)) return; if (pmd_bad(*dir)) { pmd_ERROR(*dir); pmd_clear(dir); return; } pte = pte_offset(dir, address); offset += address & PMD_MASK; address &= ~PMD_MASK; end = address + size; if (end > PMD_SIZE) end = PMD_SIZE; do { unuse_pte(vma, offset+address-vma->vm_start, pte, entry, page); address += PAGE_SIZE; pte++; } while (address && (address < end)); }
/* mmlist_lock and vma->vm_mm->page_table_lock are held */ static inline void unuse_pmd(struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long size, unsigned long offset, swp_entry_t entry, struct page* page) { struct pte_chain * pte_chain = NULL; pte_t *pte, *mapping; unsigned long end; if (pmd_none(*dir)) return; if (pmd_bad(*dir)) { pmd_ERROR(*dir); pmd_clear(dir); return; } mapping = pte = pte_offset_map(dir, address); offset += address & PMD_MASK; address &= ~PMD_MASK; end = address + size; if (end > PMD_SIZE) end = PMD_SIZE; do { /* * FIXME: handle pte_chain_alloc() failures */ if (pte_chain == NULL) pte_chain = pte_chain_alloc(GFP_ATOMIC); unuse_pte(vma, offset+address-vma->vm_start, pte, entry, page, &pte_chain); address += PAGE_SIZE; pte++; } while (address && (address < end)); pte_unmap(mapping); pte_chain_free(pte_chain); }
static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, swp_entry_t entry, struct page *page) { pte_t swp_pte = swp_entry_to_pte(entry); pte_t *pte; int ret = 0; /* * We don't actually need pte lock while scanning for swp_pte: since * we hold page lock and mmap_sem, swp_pte cannot be inserted into the * page table while we're scanning; though it could get zapped, and on * some architectures (e.g. x86_32 with PAE) we might catch a glimpse * of unmatched parts which look like swp_pte, so unuse_pte must * recheck under pte lock. Scanning without pte lock lets it be * preemptible whenever CONFIG_PREEMPT but not CONFIG_HIGHPTE. */ pte = pte_offset_map(pmd, addr); do { /* * swapoff spends a _lot_ of time in this loop! * Test inline before going to call unuse_pte. */ if (unlikely(pte_same(*pte, swp_pte))) { pte_unmap(pte); ret = unuse_pte(vma, pmd, addr, entry, page); if (ret) goto out; pte = pte_offset_map(pmd, addr); } } while (pte++, addr += PAGE_SIZE, addr != end); pte_unmap(pte - 1); out: return ret; }
static inline int unuse_pmd(struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long size, unsigned long offset, unsigned int type, unsigned long page) { pte_t * pte; unsigned long end; if (pmd_none(*dir)) return 0; if (pmd_bad(*dir)) { printk("unuse_pmd: bad pmd (%08lx)\n", pmd_val(*dir)); pmd_clear(dir); return 0; } pte = pte_offset(dir, address); offset += address & PMD_MASK; address &= ~PMD_MASK; end = address + size; if (end > PMD_SIZE) end = PMD_SIZE; do { if (unuse_pte(vma, offset+address-vma->vm_start, pte, type, page)) return 1; address += PAGE_SIZE; pte++; } while (address < end); return 0; }
static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, swp_entry_t entry, struct page *page) { pte_t swp_pte = swp_entry_to_pte(entry); pte_t *pte; spinlock_t *ptl; int found = 0; pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); do { /* * swapoff spends a _lot_ of time in this loop! * Test inline before going to call unuse_pte. */ if (unlikely(pte_same(*pte, swp_pte))) { unuse_pte(vma, pte++, addr, entry, page); found = 1; break; } } while (pte++, addr += PAGE_SIZE, addr != end); pte_unmap_unlock(pte - 1, ptl); return found; }