void free_pgd_slow(pgd_t *pgd) { pmd_t *pmd; pte_t *pte; if (!pgd) return; /* pgd is always present and good */ pmd = (pmd_t *)pgd; if (pmd_none(*pmd)) goto free; if (pmd_bad(*pmd)) { pmd_ERROR(*pmd); pmd_clear(pmd); goto free; } pte = pte_offset(pmd, 0); pmd_clear(pmd); pte_free(pte); pmd_free(pmd); free: free_pages((unsigned long) pgd, 2); }
void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd) { unsigned long flags; pmd_t *pmd; pgtable_t pte; if (!pgd) return; /* pgd is always present and good */ pmd = pmd_off(pgd + pgd_index(fcse_va_to_mva(mm, 0)), 0); if (pmd_none(*pmd)) goto free; if (pmd_bad(*pmd)) { pmd_ERROR(*pmd); pmd_clear(pmd); goto free; } pte = pmd_pgtable(*pmd); pmd_clear(pmd); pte_free(mm, pte); pmd_free(mm, pmd); free: pgd_list_lock(flags); pgd_list_del(pgd); pgd_list_unlock(flags); free_pages((unsigned long) pgd, 2); }
void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd) { pmd_t *pmd; pgtable_t pte; if (!pgd) return; /* pgd is always present and good */ pmd = pmd_off(pgd, 0); if (pmd_none(*pmd)) goto free; if (pmd_bad(*pmd)) { pmd_ERROR(*pmd); pmd_clear(pmd); goto free; } pte = pmd_pgtable(*pmd); pmd_clear(pmd); pte_free(mm, pte); pmd_free(mm, pmd); free: free_pages((unsigned long) pgd, 2); }
static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr) { if (kvm_pmd_huge(*pmd)) { pmd_clear(pmd); kvm_tlb_flush_vmid_ipa(kvm, addr); } else { pte_t *pte_table = pte_offset_kernel(pmd, 0); pmd_clear(pmd); kvm_tlb_flush_vmid_ipa(kvm, addr); pte_free_kernel(NULL, pte_table); } put_page(virt_to_page(pmd)); }
/* * Note: this doesn't free the actual pages themselves. That * has been handled earlier when unmapping all the memory regions. */ static inline void free_one_pmd(pmd_t * dir) { pte_t * pte; if (pmd_none(*dir)) return; if (pmd_bad(*dir)) { printk("free_one_pmd: bad directory entry %08lx\n", pmd_val(*dir)); pmd_clear(dir); return; } pte = pte_offset(dir, 0); pmd_clear(dir); pte_free(pte); }
/* * Note: this doesn't free the actual pages themselves. That * has been handled earlier when unmapping all the memory regions. */ static inline void free_one_pmd(pmd_t * dir) { pte_t * pte; if (pmd_none(*dir)) return; if (pmd_bad(*dir)) { pmd_ERROR(*dir); pmd_clear(dir); return; } pte = pte_offset(dir, 0); pmd_clear(dir); pte_free(pte); }
static inline int unuse_pmd(struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long size, unsigned long offset, unsigned int type, unsigned long page) { pte_t * pte; unsigned long end; if (pmd_none(*dir)) return 0; if (pmd_bad(*dir)) { printk("unuse_pmd: bad pmd (%08lx)\n", pmd_val(*dir)); pmd_clear(dir); return 0; } pte = pte_offset(dir, address); offset += address & PMD_MASK; address &= ~PMD_MASK; end = address + size; if (end > PMD_SIZE) end = PMD_SIZE; do { if (unuse_pte(vma, offset+address-vma->vm_start, pte, type, page)) return 1; address += PAGE_SIZE; pte++; } while (address < end); return 0; }
static inline void unswap_pmd(struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long size, unsigned long offset, unsigned long entry, unsigned long page /* , int isswap */) { pte_t * pte; unsigned long end; if (pmd_none(*dir)) return; if (pmd_bad(*dir)) { printk("unswap_pmd: bad pmd (%08lx)\n", pmd_val(*dir)); pmd_clear(dir); return; } pte = pte_offset(dir, address); offset += address & PMD_MASK; address &= ~PMD_MASK; end = address + size; if (end > PMD_SIZE) end = PMD_SIZE; do { unswap_pte(vma, offset+address-vma->vm_start, pte, entry, page /* , isswap */); address += PAGE_SIZE; pte++; } while (address < end); }
static inline pte_t *get_one_pte(struct mm_struct *mm, unsigned long addr) { pgd_t * pgd; pmd_t * pmd; pte_t * pte = NULL; pgd = pgd_offset(mm, addr); if (pgd_none(*pgd)) goto end; if (pgd_bad(*pgd)) { pgd_ERROR(*pgd); pgd_clear(pgd); goto end; } pmd = pmd_offset(pgd, addr); if (pmd_none(*pmd)) goto end; if (pmd_bad(*pmd)) { pmd_ERROR(*pmd); pmd_clear(pmd); goto end; } pte = pte_offset(pmd, addr); if (pte_none(*pte)) pte = NULL; end: return pte; }
/* mmlist_lock and vma->vm_mm->page_table_lock are held */ static inline void unuse_pmd(struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long size, unsigned long offset, swp_entry_t entry, struct page* page) { struct pte_chain * pte_chain = NULL; pte_t *pte, *mapping; unsigned long end; if (pmd_none(*dir)) return; if (pmd_bad(*dir)) { pmd_ERROR(*dir); pmd_clear(dir); return; } mapping = pte = pte_offset_map(dir, address); offset += address & PMD_MASK; address &= ~PMD_MASK; end = address + size; if (end > PMD_SIZE) end = PMD_SIZE; do { /* * FIXME: handle pte_chain_alloc() failures */ if (pte_chain == NULL) pte_chain = pte_chain_alloc(GFP_ATOMIC); unuse_pte(vma, offset+address-vma->vm_start, pte, entry, page, &pte_chain); address += PAGE_SIZE; pte++; } while (address && (address < end)); pte_unmap(mapping); pte_chain_free(pte_chain); }
/* * Section support is unsafe on SMP - If you iounmap and ioremap a region, * the other CPUs will not see this change until their next context switch. * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs * which requires the new ioremap'd region to be referenced, the CPU will * reference the _old_ region. * * Note that get_vm_area_caller() allocates a guard 4K page, so we need to * mask the size back to 4MB aligned or we will overflow in the loop below. */ static void unmap_area_sections(unsigned long virt, unsigned long size) { unsigned long addr = virt, end = virt + (size & ~(SZ_4M - 1)); pgd_t *pgd; flush_cache_vunmap(addr, end); pgd = pgd_offset_k(addr); do { pmd_t pmd, *pmdp = pmd_offset((pud_t *)pgd, addr); pmd = *pmdp; if (!pmd_none(pmd)) { /* * Clear the PMD from the page table, and * increment the kvm sequence so others * notice this change. * * Note: this is still racy on SMP machines. */ pmd_clear(pmdp); /* * Free the page table, if there was one. */ if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE) pte_free_kernel(&init_mm, pmd_page_vaddr(pmd)); } addr += PGDIR_SIZE; pgd++; } while (addr < end); flush_tlb_kernel_range(virt, end); }
/* * This function zeroes out partial mmap'ed pages at truncation time.. */ static void partial_clear(struct vm_area_struct *vma, unsigned long address) { pgd_t *page_dir; pmd_t *page_middle; pte_t *page_table, pte; page_dir = pgd_offset(vma->vm_mm, address); if (pgd_none(*page_dir)) return; if (pgd_bad(*page_dir)) { printk("bad page table directory entry %p:[%lx]\n", page_dir, pgd_val(*page_dir)); pgd_clear(page_dir); return; } page_middle = pmd_offset(page_dir, address); if (pmd_none(*page_middle)) return; if (pmd_bad(*page_middle)) { printk("bad page table directory entry %p:[%lx]\n", page_dir, pgd_val(*page_dir)); pmd_clear(page_middle); return; } page_table = pte_offset(page_middle, address); pte = *page_table; if (!pte_present(pte)) return; flush_cache_page(vma, address); address &= ~PAGE_MASK; address += pte_page(pte); if (address >= high_memory) return; memset((void *) address, 0, PAGE_SIZE - (address & ~PAGE_MASK)); flush_page_to_ram(pte_page(pte)); }
static inline void zap_pte_range(pmd_t * pmd, unsigned long address, unsigned long size) { pte_t * pte; if (pmd_none(*pmd)) return; if (pmd_bad(*pmd)) { printk("zap_pte_range: bad pmd (%08lx)\n", pmd_val(*pmd)); pmd_clear(pmd); return; } pte = pte_offset(pmd, address); address &= ~PMD_MASK; if (address + size > PMD_SIZE) size = PMD_SIZE - address; size >>= PAGE_SHIFT; for (;;) { pte_t page; if (!size) break; page = *pte; pte++; size--; if (pte_none(page)) continue; pte_clear(pte-1); free_pte(page); } }
static inline int copy_pte_range(pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long address, unsigned long size, int cow) { pte_t * src_pte, * dst_pte; unsigned long end; if (pmd_none(*src_pmd)) return 0; if (pmd_bad(*src_pmd)) { printk("copy_pte_range: bad pmd (%08lx)\n", pmd_val(*src_pmd)); pmd_clear(src_pmd); return 0; } src_pte = pte_offset(src_pmd, address); if (pmd_none(*dst_pmd)) { if (!pte_alloc(dst_pmd, 0)) return -ENOMEM; } dst_pte = pte_offset(dst_pmd, address); address &= ~PMD_MASK; end = address + size; if (end >= PMD_SIZE) end = PMD_SIZE; do { /* I would like to switch arguments here, to make it * consistent with copy_xxx_range and memcpy syntax. */ copy_one_pte(src_pte++, dst_pte++, cow); address += PAGE_SIZE; } while (address < end); return 0; }
static void unmap_area_sections(unsigned long virt, unsigned long size) { unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1)); pgd_t *pgd; pud_t *pud; pmd_t *pmdp; flush_cache_vunmap(addr, end); pgd = pgd_offset_k(addr); pud = pud_offset(pgd, addr); pmdp = pmd_offset(pud, addr); do { pmd_t pmd = *pmdp; if (!pmd_none(pmd)) { pmd_clear(pmdp); init_mm.context.kvm_seq++; if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE) pte_free_kernel(&init_mm, pmd_page_vaddr(pmd)); } addr += PMD_SIZE; pmdp += 2; } while (addr < end); if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq) __check_kvm_seq(current->active_mm); flush_tlb_kernel_range(virt, end); }
static void teardown_huge_pte(hugepte_t *ptep) { int i; for (i = 0; i < HUGEPTE_BATCH_SIZE; i++) pmd_clear((pmd_t *)(ptep+i)); }
static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned long size) { pte_t * pte; unsigned long end; if (pmd_none(*pmd)) return; if (pmd_bad(*pmd)) { printk("free_area_pte: bad pmd (%08lx)\n", pmd_val(*pmd)); pmd_clear(pmd); return; } pte = pte_offset(pmd, address); address &= ~PMD_MASK; end = address + size; if (end > PMD_SIZE) end = PMD_SIZE; while (address < end) { pte_t page = *pte; pte_clear(pte); address += PAGE_SIZE; pte++; if (pte_none(page)) continue; if (pte_present(page)) { free_page(pte_page(page)); continue; } printk("Whee.. Swapped out page in kernel page table\n"); } }
static inline int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long end, int gfp_mask) { pte_t * pte; unsigned long pmd_end; if (pmd_none(*dir)) return 0; if (pmd_bad(*dir)) { pmd_ERROR(*dir); pmd_clear(dir); return 0; } pte = pte_offset(dir, address); pmd_end = (address + PMD_SIZE) & PMD_MASK; if (end > pmd_end) end = pmd_end; do { int result; mm->swap_address = address + PAGE_SIZE; result = try_to_swap_out(mm, vma, address, pte, gfp_mask); if (result) return result; if (!mm->swap_cnt) return 0; address += PAGE_SIZE; pte++; } while (address && (address < end)); return 0; }
static unsigned long get_phys_addr(struct task_struct * p, unsigned long ptr) { pgd_t *page_dir; pmd_t *page_middle; pte_t pte; if (!p || !p->mm || ptr >= TASK_SIZE) return 0; page_dir = pgd_offset(p->mm,ptr); if (pgd_none(*page_dir)) return 0; if (pgd_bad(*page_dir)) { printk("bad page directory entry %08lx\n", pgd_val(*page_dir)); pgd_clear(page_dir); return 0; } page_middle = pmd_offset(page_dir,ptr); if (pmd_none(*page_middle)) return 0; if (pmd_bad(*page_middle)) { printk("bad page middle entry %08lx\n", pmd_val(*page_middle)); pmd_clear(page_middle); return 0; } pte = *pte_offset(page_middle,ptr); if (!pte_present(pte)) return 0; return pte_page(pte) + (ptr & ~PAGE_MASK); }
static inline void remove_mapping_pte_range (pmd_t *pmd, unsigned long address, unsigned long size) { pte_t *pte; unsigned long end; if (pmd_none (*pmd)) return; if (pmd_bad (*pmd)){ printk ("remove_graphics_pte_range: bad pmd (%08lx)\n", pmd_val (*pmd)); pmd_clear (pmd); return; } pte = pte_offset (pmd, address); address &= ~PMD_MASK; end = address + size; if (end > PMD_SIZE) end = PMD_SIZE; do { pte_t entry = *pte; if (pte_present (entry)) set_pte (pte, pte_modify (entry, PAGE_NONE)); address += PAGE_SIZE; pte++; } while (address < end); }
static inline void iterate_pte(pmd_t * pmd, unsigned long address, unsigned long size, pte_iterator_t op, unsigned long arg) { pte_t *pte; unsigned long end; if (pmd_none(*pmd)) return; if (pmd_bad(*pmd)) { pmd_ERROR(*pmd); pmd_clear(pmd); return; } pte = pte_offset(pmd, address); address &= ~PMD_MASK; end = address + size; if (end > PMD_SIZE) end = PMD_SIZE; do { op(pte, arg); address += PAGE_SIZE; pte++; } while (address < end); }
static inline int zap_pte_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size) { pte_t * pte; int freed; if (pmd_none(*pmd)) return 0; if (pmd_bad(*pmd)) { pmd_ERROR(*pmd); pmd_clear(pmd); return 0; } pte = pte_offset(pmd, address); address &= ~PMD_MASK; if (address + size > PMD_SIZE) size = PMD_SIZE - address; size >>= PAGE_SHIFT; freed = 0; for (;;) { pte_t page; if (!size) break; page = ptep_get_and_clear(pte); pte++; size--; if (pte_none(page)) continue; freed += free_pte(page); } return freed; }
void pgd_free(struct mm_struct *mm, pgd_t *pgd_base) { pgd_t *pgd; pmd_t *pmd; pgtable_t pte; if (!pgd_base) return; pgd = pgd_base + pgd_index(0); if (pgd_none_or_clear_bad(pgd)) goto no_pgd; pmd = pmd_offset(pgd, 0); if (pmd_none_or_clear_bad(pmd)) goto no_pmd; pte = pmd_pgtable(*pmd); pmd_clear(pmd); pte_free(mm, pte); no_pmd: pgd_clear(pgd); pmd_free(mm, pmd); no_pgd: free_pages((unsigned long) pgd_base, 2); }
/* mmlist_lock and vma->vm_mm->page_table_lock are held */ static inline void unuse_pmd(struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long size, unsigned long offset, swp_entry_t entry, struct page* page) { pte_t * pte; unsigned long end; if (pmd_none(*dir)) return; if (pmd_bad(*dir)) { pmd_ERROR(*dir); pmd_clear(dir); return; } pte = pte_offset(dir, address); offset += address & PMD_MASK; address &= ~PMD_MASK; end = address + size; if (end > PMD_SIZE) end = PMD_SIZE; do { unuse_pte(vma, offset+address-vma->vm_start, pte, entry, page); address += PAGE_SIZE; pte++; } while (address && (address < end)); }
static inline pte_t *get_one_pte(struct mm_struct *mm, unsigned long addr) { pgd_t * pgd; pmd_t * pmd; pte_t * pte = NULL; pgd = pgd_offset(mm, addr); if (pgd_none(*pgd)) goto end; if (pgd_bad(*pgd)) { printk("move_one_page: bad source pgd (%08lx)\n", pgd_val(*pgd)); pgd_clear(pgd); goto end; } pmd = pmd_offset(pgd, addr); if (pmd_none(*pmd)) goto end; if (pmd_bad(*pmd)) { printk("move_one_page: bad source pmd (%08lx)\n", pmd_val(*pmd)); pmd_clear(pmd); goto end; } pte = pte_offset(pmd, addr); if (pte_none(*pte)) pte = NULL; end: return pte; }
static int filemap_sync_pte_range(pmd_t * pmd, unsigned long address, unsigned long end, struct vm_area_struct *vma, unsigned int flags) { pte_t *pte; int error; if (pmd_none(*pmd)) return 0; if (pmd_bad(*pmd)) { pmd_ERROR(*pmd); pmd_clear(pmd); return 0; } pte = pte_offset_map(pmd, address); if ((address & PMD_MASK) != (end & PMD_MASK)) end = (address & PMD_MASK) + PMD_SIZE; error = 0; do { error |= filemap_sync_pte(pte, vma, address, flags); address += PAGE_SIZE; pte++; } while (address && (address < end)); pte_unmap(pte - 1); return error; }
static void unmap_pmds(struct kvm *kvm, pud_t *pud, phys_addr_t addr, phys_addr_t end) { phys_addr_t next, start_addr = addr; pmd_t *pmd, *start_pmd; start_pmd = pmd = pmd_offset(pud, addr); do { next = kvm_pmd_addr_end(addr, end); if (!pmd_none(*pmd)) { if (kvm_pmd_huge(*pmd)) { pmd_t old_pmd = *pmd; pmd_clear(pmd); kvm_tlb_flush_vmid_ipa(kvm, addr); kvm_flush_dcache_pmd(old_pmd); put_page(virt_to_page(pmd)); } else { unmap_ptes(kvm, pmd, addr, next); } } } while (pmd++, addr = next, addr != end); if (kvm_pmd_table_empty(kvm, start_pmd)) clear_pud_entry(kvm, pud, start_addr); }
static bool kvm_mips_flush_gpa_pmd(pmd_t *pmd, unsigned long start_gpa, unsigned long end_gpa) { pte_t *pte; unsigned long end = ~0ul; int i_min = __pmd_offset(start_gpa); int i_max = __pmd_offset(end_gpa); bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PMD - 1); int i; for (i = i_min; i <= i_max; ++i, start_gpa = 0) { if (!pmd_present(pmd[i])) continue; pte = pte_offset(pmd + i, 0); if (i == i_max) end = end_gpa; if (kvm_mips_flush_gpa_pte(pte, start_gpa, end)) { pmd_clear(pmd + i); pte_free_kernel(NULL, pte); } else { safe_to_remove = false; } } return safe_to_remove; }
void pgd_free(struct mm_struct *mm, pgd_t *pgd_base) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pgtable_t pte; if (!pgd_base) return; pgd = pgd_base + pgd_index(0); if (pgd_none_or_clear_bad(pgd)) goto no_pgd; pud = pud_offset(pgd, 0); if (pud_none_or_clear_bad(pud)) goto no_pud; pmd = pmd_offset(pud, 0); if (pmd_none_or_clear_bad(pmd)) goto no_pmd; pte = pmd_pgtable(*pmd); pmd_clear(pmd); pte_free(mm, pte); no_pmd: pud_clear(pud); pmd_free(mm, pmd); no_pud: pgd_clear(pgd); pud_free(mm, pud); no_pgd: #if defined(CONFIG_SYNO_ARMADA_ARCH) #ifdef CONFIG_ARM_LPAE /* * Free modules/pkmap or identity pmd tables. */ for (pgd = pgd_base; pgd < pgd_base + PTRS_PER_PGD; pgd++) { if (pgd_none_or_clear_bad(pgd)) continue; if (pgd_val(*pgd) & L_PGD_SWAPPER) continue; pud = pud_offset(pgd, 0); if (pud_none_or_clear_bad(pud)) continue; pmd = pmd_offset(pud, 0); pud_clear(pud); pmd_free(mm, pmd); pgd_clear(pgd); pud_free(mm, pud); } #endif __pgd_free(pgd_base); #elif defined(CONFIG_SYNO_COMCERTO) free_pages((unsigned long) pgd_base, get_order(16384)); #else free_pages((unsigned long) pgd_base, 2); #endif }
static int mem_write(struct inode * inode, struct file * file,char * buf, int count) { pgd_t *page_dir; pmd_t *page_middle; pte_t pte; char * page; struct task_struct * tsk; unsigned long addr; char *tmp; int i; if (count < 0) return -EINVAL; addr = file->f_pos; tsk = get_task(inode->i_ino >> 16); if (!tsk) return -ESRCH; tmp = buf; while (count > 0) { if (current->signal & ~current->blocked) break; page_dir = pgd_offset(tsk,addr); if (pgd_none(*page_dir)) break; if (pgd_bad(*page_dir)) { printk("Bad page dir entry %08lx\n", pgd_val(*page_dir)); pgd_clear(page_dir); break; } page_middle = pmd_offset(page_dir,addr); if (pmd_none(*page_middle)) break; if (pmd_bad(*page_middle)) { printk("Bad page middle entry %08lx\n", pmd_val(*page_middle)); pmd_clear(page_middle); break; } pte = *pte_offset(page_middle,addr); if (!pte_present(pte)) break; if (!pte_write(pte)) break; page = (char *) pte_page(pte) + (addr & ~PAGE_MASK); i = PAGE_SIZE-(addr & ~PAGE_MASK); if (i > count) i = count; memcpy_fromfs(page, tmp, i); addr += i; tmp += i; count -= i; } file->f_pos = addr; if (tmp != buf) return tmp-buf; if (current->signal & ~current->blocked) return -ERESTARTSYS; return 0; }