/* * Dump out the page tables associated with 'addr' in mm 'mm'. */ void show_pte(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; if (!mm) mm = &init_mm; pr_alert("pgd = %p\n", mm->pgd); pgd = pgd_offset(mm, addr); pr_alert("[%08lx] *pgd=%016llx", addr, pgd_val(*pgd)); do { pud_t *pud; pmd_t *pmd; pte_t *pte; if (pgd_none_or_clear_bad(pgd)) break; pud = pud_offset(pgd, addr); if (pud_none_or_clear_bad(pud)) break; pmd = pmd_offset(pud, addr); printk(", *pmd=%016llx", pmd_val(*pmd)); if (pmd_none_or_clear_bad(pmd)) break; pte = pte_offset_map(pmd, addr); printk(", *pte=%016llx", pte_val(*pte)); pte_unmap(pte); } while(0); printk("\n"); }
static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, struct mm_walk *walk) { pmd_t *pmd; unsigned long next; int err = 0; pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); if (pmd_none_or_clear_bad(pmd)) { if (walk->pte_hole) err = walk->pte_hole(addr, next, walk); if (err) break; continue; } if (walk->pmd_entry) err = walk->pmd_entry(pmd, addr, next, walk); if (!err && walk->pte_entry) err = walk_pte_range(pmd, addr, next, walk); if (err) break; } while (pmd++, addr = next, addr != end); return err; }
void pgd_free(struct mm_struct *mm, pgd_t *pgd_base) { pgd_t *pgd; pmd_t *pmd; pgtable_t pte; if (!pgd_base) return; pgd = pgd_base + pgd_index(0); if (pgd_none_or_clear_bad(pgd)) goto no_pgd; pmd = pmd_offset(pgd, 0); if (pmd_none_or_clear_bad(pmd)) goto no_pmd; pte = pmd_pgtable(*pmd); pmd_clear(pmd); pte_free(mm, pte); no_pmd: pgd_clear(pgd); pmd_free(mm, pmd); no_pgd: free_pages((unsigned long) pgd_base, 2); }
static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long protection_flags, unsigned long long textaccess, unsigned long address) { pgd_t *dir; pud_t *pud; pmd_t *pmd; static pte_t *pte; pte_t entry; dir = pgd_offset_k(address); pud = pud_offset(dir, address); if (pud_none_or_clear_bad(pud)) return 0; pmd = pmd_offset(pud, address); if (pmd_none_or_clear_bad(pmd)) return 0; pte = pte_offset_kernel(pmd, address); entry = *pte; if (pte_none(entry) || !pte_present(entry)) return 0; if ((pte_val(entry) & protection_flags) != protection_flags) return 0; __do_tlb_refill(address, textaccess, pte); return 1; }
void pgd_free(struct mm_struct *mm, pgd_t *pgd_base) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pgtable_t pte; if (!pgd_base) return; pgd = pgd_base + pgd_index(0); if (pgd_none_or_clear_bad(pgd)) goto no_pgd; pud = pud_offset(pgd, 0); if (pud_none_or_clear_bad(pud)) goto no_pud; pmd = pmd_offset(pud, 0); if (pmd_none_or_clear_bad(pmd)) goto no_pmd; pte = pmd_pgtable(*pmd); pmd_clear(pmd); pte_free(mm, pte); no_pmd: pud_clear(pud); pmd_free(mm, pmd); no_pud: pgd_clear(pgd); pud_free(mm, pud); no_pgd: #if defined(CONFIG_SYNO_ARMADA_ARCH) #ifdef CONFIG_ARM_LPAE /* * Free modules/pkmap or identity pmd tables. */ for (pgd = pgd_base; pgd < pgd_base + PTRS_PER_PGD; pgd++) { if (pgd_none_or_clear_bad(pgd)) continue; if (pgd_val(*pgd) & L_PGD_SWAPPER) continue; pud = pud_offset(pgd, 0); if (pud_none_or_clear_bad(pud)) continue; pmd = pmd_offset(pud, 0); pud_clear(pud); pmd_free(mm, pmd); pgd_clear(pgd); pud_free(mm, pud); } #endif __pgd_free(pgd_base); #elif defined(CONFIG_SYNO_COMCERTO) free_pages((unsigned long) pgd_base, get_order(16384)); #else free_pages((unsigned long) pgd_base, 2); #endif }
void pgd_free(struct mm_struct *mm, pgd_t *pgd_base) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pgtable_t pte; if (!pgd_base) return; pgd = pgd_base + pgd_index(0); if (pgd_none_or_clear_bad(pgd)) goto no_pgd; pud = pud_offset(pgd, 0); if (pud_none_or_clear_bad(pud)) goto no_pud; pmd = pmd_offset(pud, 0); if (pmd_none_or_clear_bad(pmd)) goto no_pmd; pte = pmd_pgtable(*pmd); pmd_clear(pmd); pte_free(mm, pte); mm_dec_nr_ptes(mm); no_pmd: pud_clear(pud); pmd_free(mm, pmd); mm_dec_nr_pmds(mm); no_pud: pgd_clear(pgd); pud_free(mm, pud); no_pgd: #ifdef CONFIG_ARM_LPAE /* * Free modules/pkmap or identity pmd tables. */ for (pgd = pgd_base; pgd < pgd_base + PTRS_PER_PGD; pgd++) { if (pgd_none_or_clear_bad(pgd)) continue; if (pgd_val(*pgd) & L_PGD_SWAPPER) continue; pud = pud_offset(pgd, 0); if (pud_none_or_clear_bad(pud)) continue; pmd = pmd_offset(pud, 0); pud_clear(pud); pmd_free(mm, pmd); mm_dec_nr_pmds(mm); pgd_clear(pgd); pud_free(mm, pud); } #endif __pgd_free(pgd_base); }
static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end) { pmd_t *pmd; unsigned long next; pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); if (pmd_none_or_clear_bad(pmd)) continue; vunmap_pte_range(pmd, addr, next); } while (pmd++, addr = next, addr != end); }
static inline int check_pmd_range(struct mm_struct *mm, pud_t *pud, unsigned long addr, unsigned long end, unsigned long *nodes) { pmd_t *pmd; unsigned long next; pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); if (pmd_none_or_clear_bad(pmd)) continue; if (check_pte_range(mm, pmd, addr, next, nodes)) return -EIO; } while (pmd++, addr = next, addr != end); return 0; }
static inline unsigned long msync_pmd_range(struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end) { pmd_t *pmd; unsigned long next; unsigned long ret = 0; pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); if (pmd_none_or_clear_bad(pmd)) continue; ret += msync_pte_range(vma, pmd, addr, next); } while (pmd++, addr = next, addr != end); return ret; }
static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, swp_entry_t entry, struct page *page) { pmd_t *pmd; unsigned long next; pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); if (pmd_none_or_clear_bad(pmd)) continue; if (unuse_pte_range(vma, pmd, addr, next, entry, page)) return 1; } while (pmd++, addr = next, addr != end); return 0; }
static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, struct mm_walk *walk) { pmd_t *pmd; unsigned long next; int err = 0; pmd = pmd_offset(pud, addr); do { again: next = pmd_addr_end(addr, end); if (pmd_none(*pmd)) { if (walk->pte_hole) err = walk->pte_hole(addr, next, walk); if (err) break; continue; } /* * This implies that each ->pmd_entry() handler * needs to know about pmd_trans_huge() pmds */ if (walk->pmd_entry) err = walk->pmd_entry(pmd, addr, next, walk); if (err) break; /* * Check this here so we only break down trans_huge * pages when we _need_ to */ if (!walk->pte_entry) continue; split_huge_page_pmd(walk->mm, pmd); if (pmd_none_or_clear_bad(pmd)) goto again; err = walk_pte_range(pmd, addr, next, walk); if (err) break; } while (pmd++, addr = next, addr != end); return err; }
static unsigned get_pte_for_vaddr(unsigned vaddr) { struct task_struct *task = get_current(); struct mm_struct *mm = task->mm; pgd_t *pgd; pmd_t *pmd; pte_t *pte; if (!mm) mm = task->active_mm; pgd = pgd_offset(mm, vaddr); if (pgd_none_or_clear_bad(pgd)) return 0; pmd = pmd_offset(pgd, vaddr); if (pmd_none_or_clear_bad(pmd)) return 0; pte = pte_offset_map(pmd, vaddr); if (!pte) return 0; return pte_val(*pte); }
void pgd_free(struct mm_struct *mm, pgd_t *pgd_base) { unsigned long flags; pgd_t *pgd; pud_t *pud; pmd_t *pmd; pgtable_t pte; if (!pgd_base) return; pgd = pgd_base + pgd_index(0); if (pgd_none_or_clear_bad(pgd)) goto no_pgd; pud = pud_offset(pgd + pgd_index(fcse_va_to_mva(mm, 0)), 0); if (pud_none_or_clear_bad(pud)) goto no_pud; pmd = pmd_offset(pud, 0); if (pmd_none_or_clear_bad(pmd)) goto no_pmd; pte = pmd_pgtable(*pmd); pmd_clear(pmd); pte_free(mm, pte); no_pmd: pud_clear(pud); pmd_free(mm, pmd); no_pud: pgd_clear(pgd); pud_free(mm, pud); no_pgd: pgd_list_lock(flags); pgd_list_del(pgd); pgd_list_unlock(flags); free_pages((unsigned long) pgd_base, 2); }
static void mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, unsigned char *vec) { unsigned long next; pmd_t *pmd; pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); if (pmd_trans_huge(*pmd)) { if (mincore_huge_pmd(vma, pmd, addr, next, vec)) { vec += (next - addr) >> PAGE_SHIFT; continue; } /* fall through */ } if (pmd_none_or_clear_bad(pmd)) mincore_unmapped_range(vma, addr, next, vec); else mincore_pte_range(vma, pmd, addr, next, vec); vec += (next - addr) >> PAGE_SHIFT; } while (pmd++, addr = next, addr != end); }
int show$address(int mask) { #ifdef __i386__ int retlen; $DESCRIPTOR(p, "p1"); char c[80]; struct dsc$descriptor o; o.dsc$a_pointer=c; o.dsc$w_length=80; memset (c, 0, 80); int sts = cli$present(&p); if (sts&1) { sts = cli$get_value(&p, &o, &retlen); o.dsc$w_length=retlen; } long addr = strtol (c, 0, 16); long isnot_addr = addr; if (addr == 0) sts = sda_find_addr (c, &addr); if ((sts&1)==0) return sts; long size = sizeof(long); struct _pcb * pcb; struct mm_struct mm; long page = addr & PAGE_MASK; pgd_t *pgd = 0, *pgd_k; pud_t *pud = 0, *pud_k; pmd_t *pmd = 0, *pmd_k; pte_t *pte = 0, *pte_k; long phys = 0; sda$getmemlong(ctl$gl_pcb, &pcb); sda$getmem(pcb->mm, &mm, sizeof (mm)); pgd_k = pgd_offset (&mm, page); sda$getmemlong(pgd_k, &pgd); if (pgd_none_or_clear_bad(pgd)) goto out; pud_k = pud_offset(pgd, page); sda$getmemlong(pud_k, &pud); if (pud_none_or_clear_bad(pud)) goto out; pmd_k = pmd_offset(pud, page); sda$getmemlong(pmd_k, &pmd); if (pmd_none_or_clear_bad(pmd)) goto out; pte_k = pte_offset(pmd, page); sda$getmemlong(pte_k, &pte); if (pte_none/*_or_clear_bad*/(*pte)) goto out; phys = *(long *)pte & PAGE_MASK; out: printf("\t%lx is an xx address\n\n"); printf("\tMapped by Level-4 PTE at: %lx : %lx\n", pte_k, *pte); printf("\tMapped by Level-3 PTE at: %lx : %lx\n", pmd_k, *pmd); printf("\tMapped by Level-2 PTE at: %lx : %lx\n", pud_k, *pud); printf("\tMapped by Level-1 PTE at: %lx : %lx\n", pgd_k, *pgd); printf("\tMapped by Level-0 PGD at: %lx\n", mm.pgd); // printf("\tAlso mapped in SPT window at: %lx\n\n", 0); printf("\tMapped to physical address %lx\n\n", phys); #endif }