/* Ensure all existing pages follow the policy. */ static int verify_pages(struct mm_struct *mm, unsigned long addr, unsigned long end, unsigned long *nodes) { while (addr < end) { struct page *p; pte_t *pte; pmd_t *pmd; pud_t *pud; pgd_t *pgd; pgd = pgd_offset(mm, addr); if (pgd_none(*pgd)) { unsigned long next = (addr + PGDIR_SIZE) & PGDIR_MASK; if (next > addr) break; addr = next; continue; } pud = pud_offset(pgd, addr); if (pud_none(*pud)) { addr = (addr + PUD_SIZE) & PUD_MASK; continue; } pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) { addr = (addr + PMD_SIZE) & PMD_MASK; continue; } p = NULL; pte = pte_offset_map(pmd, addr); if (pte_present(*pte)) p = pte_page(*pte); pte_unmap(pte); if (p) { unsigned nid = page_to_nid(p); if (!test_bit(nid, nodes)) return -EIO; } addr += PAGE_SIZE; } return 0; }
/* * We can receive a page fault from a migrating PTE at any time. * Handle it by just waiting until the fault resolves. * * It's also possible to get a migrating kernel PTE that resolves * itself during the downcall from hypervisor to Linux. We just check * here to see if the PTE seems valid, and if so we retry it. * * NOTE! We MUST NOT take any locks for this case. We may be in an * interrupt or a critical region, and must do as little as possible. * Similarly, we can't use atomic ops here, since we may be handling a * fault caused by an atomic op access. */ static int handle_migrating_pte(pgd_t *pgd, int fault_num, unsigned long address, int is_kernel_mode, int write) { pud_t *pud; pmd_t *pmd; pte_t *pte; pte_t pteval; if (pgd_addr_invalid(address)) return 0; pgd += pgd_index(address); pud = pud_offset(pgd, address); if (!pud || !pud_present(*pud)) return 0; pmd = pmd_offset(pud, address); if (!pmd || !pmd_present(*pmd)) return 0; pte = pmd_huge_page(*pmd) ? ((pte_t *)pmd) : pte_offset_kernel(pmd, address); pteval = *pte; if (pte_migrating(pteval)) { wait_for_migration(pte); return 1; } if (!is_kernel_mode || !pte_present(pteval)) return 0; if (fault_num == INT_ITLB_MISS) { if (pte_exec(pteval)) return 1; } else if (write) { if (pte_write(pteval)) return 1; } else { if (pte_read(pteval)) return 1; } return 0; }
/* * Section support is unsafe on SMP - If you iounmap and ioremap a region, * the other CPUs will not see this change until their next context switch. * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs * which requires the new ioremap'd region to be referenced, the CPU will * reference the _old_ region. * * Note that get_vm_area() allocates a guard 4K page, so we need to mask * the size back to 1MB aligned or we will overflow in the loop below. */ static void unmap_area_sections(unsigned long virt, unsigned long size) { unsigned long addr = virt, end = virt + (size & ~SZ_1M); pgd_t *pgd; flush_cache_vunmap(addr, end); pgd = pgd_offset_k(addr); do { pmd_t pmd, *pmdp = pmd_offset(pgd, addr); pmd = *pmdp; if (!pmd_none(pmd)) { /* * Clear the PMD from the page table, and * increment the kvm sequence so others * notice this change. * * Note: this is still racy on SMP machines. */ pmd_clear(pmdp); init_mm.context.kvm_seq++; /* * Free the page table, if there was one. */ if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE) pte_free_kernel(&init_mm, pmd_page_vaddr(pmd)); } addr += PGDIR_SIZE; pgd++; } while (addr < end); /* * Ensure that the active_mm is up to date - we want to * catch any use-after-iounmap cases. */ if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq) __check_kvm_seq(current->active_mm); flush_tlb_kernel_range(virt, end); }
static inline pte_t *follow_table(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pgd = pgd_offset(mm, addr); if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) return (pte_t *) 0x3a; pud = pud_offset(pgd, addr); if (pud_none(*pud) || unlikely(pud_bad(*pud))) return (pte_t *) 0x3b; pmd = pmd_offset(pud, addr); if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) return (pte_t *) 0x10; return pte_offset_map(pmd, addr); }
void mprotect_kernel_vm(int w) { struct mm_struct *mm; pgd_t *pgd; pmd_t *pmd; pte_t *pte; unsigned long addr; mm = &init_mm; for(addr = start_vm; addr < end_vm;){ pgd = pgd_offset(mm, addr); pmd = pmd_offset(pgd, addr); if(pmd_present(*pmd)){ pte = pte_offset_kernel(pmd, addr); if(pte_present(*pte)) protect_vm_page(addr, w, 0); addr += PAGE_SIZE; } else addr += PMD_SIZE; } }
static inline unsigned long uvirt_to_kva(pgd_t *pgd, unsigned long adr) { unsigned long ret = 0UL; pmd_t *pmd; pte_t *ptep, pte; if (!pgd_none(*pgd)) { pmd = pmd_offset(pgd, adr); if (!pmd_none(*pmd)) { ptep = pte_offset_kernel(pmd, adr); pte = *ptep; if(pte_present(pte)) { ret = (unsigned long) page_address(pte_page(pte)); ret |= (adr & (PAGE_SIZE - 1)); } } } // printk(KERN_INFO "uv2kva(%lx-->%lx) \n", adr, ret); return ret; }
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte = NULL; addr &= HPAGE_MASK; pgd = pgd_offset(mm, addr); if (!pgd_none(*pgd)) { pud = pud_offset(pgd, addr); if (!pud_none(*pud)) { pmd = pmd_offset(pud, addr); if (!pmd_none(*pmd)) pte = pte_offset_map(pmd, addr); } } return pte; }
static inline void free_one_pgd(pgd_t * dir) { int j; pmd_t * pmd; if (pgd_none(*dir)) return; if (pgd_bad(*dir)) { pgd_ERROR(*dir); pgd_clear(dir); return; } pmd = pmd_offset(dir, 0); pgd_clear(dir); for (j = 0; j < PTRS_PER_PMD ; j++) { prefetchw(pmd+j+(PREFETCH_STRIDE/16)); free_one_pmd(pmd+j); } pmd_free(pmd); }
//walk_page_table modified static pte_t *walk_page_table(unsigned long addr) { pgd_t *pgdp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep; pgdp = pgd_offset_k(addr); if (pgd_none(*pgdp)) return NULL; pudp = pud_offset(pgdp,addr); if (pud_none(*pudp) || pud_large(*pudp)) return NULL; pmdp = pmd_offset(pudp, addr); if (pmd_none(*pmdp) || pmd_large(*pmdp)) return NULL; ptep = pte_offset_kernel(pmdp, addr); if (pte_none(*ptep)) return NULL; return ptep; }
/* * Add a PAGE mapping between VIRT and PHYS in domain * DOMAIN with protection PROT. Note that due to the * way we map the PTEs, we must allocate two PTE_SIZE'd * blocks - one for the Linux pte table, and one for * the hardware pte table. */ static inline void alloc_init_page(unsigned long virt, unsigned long phys, int domain, int prot) { pmd_t *pmdp; pte_t *ptep; pmdp = pmd_offset(pgd_offset_k(virt), virt); if (pmd_none(*pmdp)) { pte_t *ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t)); ptep += PTRS_PER_PTE; set_pmd(pmdp, __mk_pmd(ptep, PMD_TYPE_TABLE | PMD_DOMAIN(domain))); } ptep = pte_offset(pmdp, virt); set_pte(ptep, mk_pte_phys(phys, __pgprot(prot))); }
/* * Insert the gateway page into a set of page tables, creating the * page tables if necessary. */ static void insert_gateway_page(pgd_t *pgd, unsigned long address) { pud_t *pud; pmd_t *pmd; pte_t *pte; BUG_ON(!pgd_present(*pgd)); pud = pud_offset(pgd, address); BUG_ON(!pud_present(*pud)); pmd = pmd_offset(pud, address); if (!pmd_present(*pmd)) { pte = alloc_bootmem_pages(PAGE_SIZE); set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte))); } pte = pte_offset_kernel(pmd, address); set_pte(pte, pfn_pte(__pa(gateway_page) >> PAGE_SHIFT, PAGE_READONLY)); }
static void shmedia_unmapioaddr(unsigned long vaddr) { pgd_t *pgdp; pmd_t *pmdp; pte_t *ptep; pgdp = pgd_offset_k(vaddr); pmdp = pmd_offset(pgdp, vaddr); if (pmd_none(*pmdp) || pmd_bad(*pmdp)) return; ptep = pte_offset(pmdp, vaddr); if (pte_none(*ptep) || !pte_present(*ptep)) return; clear_page((void *)ptep); pte_clear(ptep); }
int map_page(unsigned long va, phys_addr_t pa, int flags) { pmd_t *pd; pte_t *pg; int err = -ENOMEM; /* Use upper 10 bits of VA to index the first level map */ pd = pmd_offset(pud_offset(pgd_offset_k(va), va), va); /* Use middle 10 bits of VA to index the second-level map */ pg = pte_alloc_kernel(pd, va); if (pg != 0) { err = 0; /* The PTE should never be already set nor present in the * hash table */ BUG_ON((pte_val(*pg) & (_PAGE_PRESENT | _PAGE_HASHPTE)) && flags); set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags))); }
/* * cpd_cache_flush_page: Ensures coherency of cache entries owned * by 'vma_p's mm for 'va_page'. * Assumptions: * - Kernel memory coherent with caches. * - User caches entries covered by 'va_page' are not coherent iff covered by * a CPD entry owned by the mm associated with 'vma_p'. * Action: * - if CPD covering 'va_page' is owned by 'vma_p's mm, invalidate caches * entries. * Notes: * - The page is specified by a VA while the flushing call and CPD access * uses a MVA. */ void cpd_cache_flush_page(struct vm_area_struct* vma_p, unsigned long va_page) { pmd_t cpd; int domain; unsigned long mva_page = va_to_mva(va_page, vma_p->vm_mm); /* Does 'vma_p's mm have any incoherencies? */ if (!cpd_is_mm_cache_coherent(vma_p->vm_mm)) { cpd = *pmd_offset(pgd_offset_k(mva_page), mva_page); domain = pmd_domain(cpd); /* Is CPD entry's domain incoherent and active in 'vma_p's mm? */ if (!cpd_is_domain_cache_coherent(domain) && domain_active(vma_p->vm_mm->context.dacr, domain)) { cpu_cache_clean_invalidate_range(mva_page, mva_page + PAGE_SIZE, vma_p->vm_flags & VM_EXEC); } } }
pte_t *virt_to_pte(struct mm_struct* mm, unsigned long addr) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; if (pgd_addr_invalid(addr)) return NULL; pgd = mm ? pgd_offset(mm, addr) : swapper_pg_dir + pgd_index(addr); pud = pud_offset(pgd, addr); if (!pud_present(*pud)) return NULL; pmd = pmd_offset(pud, addr); if (pmd_huge_page(*pmd)) return (pte_t *)pmd; if (!pmd_present(*pmd)) return NULL; return pte_offset_kernel(pmd, addr); }
/* * paging_init() continues the virtual memory environment setup which * was begun by the code in arch/head.S. * The parameters are pointers to where to stick the starting and ending * addresses of available kernel virtual memory. */ void __init paging_init(void) { unsigned long zones_size[MAX_NR_ZONES] = {0, }; /* allocate some pages for kernel housekeeping tasks */ empty_bad_page_table = (unsigned long) alloc_bootmem_pages(PAGE_SIZE); empty_bad_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE); empty_zero_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE); memset((void *) empty_zero_page, 0, PAGE_SIZE); #ifdef CONFIG_HIGHMEM if (num_physpages - num_mappedpages) { pgd_t *pge; pud_t *pue; pmd_t *pme; pkmap_page_table = alloc_bootmem_pages(PAGE_SIZE); pge = swapper_pg_dir + pgd_index_k(PKMAP_BASE); pue = pud_offset(pge, PKMAP_BASE); pme = pmd_offset(pue, PKMAP_BASE); __set_pmd(pme, virt_to_phys(pkmap_page_table) | _PAGE_TABLE); } #endif /* distribute the allocatable pages across the various zones and pass them to the allocator */ zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn; #ifdef CONFIG_HIGHMEM zones_size[ZONE_HIGHMEM] = num_physpages - num_mappedpages; #endif free_area_init(zones_size); #ifdef CONFIG_MMU /* initialise init's MMU context */ init_new_context(&init_task, &init_mm); #endif } /* end paging_init() */
/* * __iounmap unmaps nearly everything, so be careful * it doesn't free currently pointer/page tables anymore but it * wans't used anyway and might be added later. */ void __iounmap(void *addr, unsigned long size) { unsigned long virtaddr = (unsigned long)addr; pgd_t *pgd_dir; pmd_t *pmd_dir; pte_t *pte_dir; while ((long)size > 0) { pgd_dir = pgd_offset_k(virtaddr); if (pgd_bad(*pgd_dir)) { printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir)); pgd_clear(pgd_dir); return; } pmd_dir = pmd_offset(pgd_dir, virtaddr); if (CPU_IS_020_OR_030) { int pmd_off = (virtaddr/PTRTREESIZE) & 15; if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) { pmd_dir->pmd[pmd_off] = 0; virtaddr += PTRTREESIZE; size -= PTRTREESIZE; continue; } } if (pmd_bad(*pmd_dir)) { printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir)); pmd_clear(pmd_dir); return; } pte_dir = pte_offset_kernel(pmd_dir, virtaddr); pte_val(*pte_dir) = 0; virtaddr += PAGE_SIZE; size -= PAGE_SIZE; } flush_tlb_all(); }
/* * This routine gets a long from any process space by following the page * tables. NOTE! You should check that the long isn't on a page boundary, * and that it is in the task area before calling this: this routine does * no checking. */ static unsigned long get_long(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long addr) { pgd_t * pgdir; pmd_t * pgmiddle; pte_t * pgtable; unsigned long page; repeat: pgdir = pgd_offset(vma->vm_mm, addr); if (pgd_none(*pgdir)) { do_no_page(tsk, vma, addr, 0); goto repeat; } if (pgd_bad(*pgdir)) { printk("ptrace: bad page directory %08lx\n", pgd_val(*pgdir)); pgd_clear(pgdir); return 0; } pgmiddle = pmd_offset(pgdir, addr); if (pmd_none(*pgmiddle)) { do_no_page(tsk, vma, addr, 0); goto repeat; } if (pmd_bad(*pgmiddle)) { printk("ptrace: bad page middle %08lx\n", pmd_val(*pgmiddle)); pmd_clear(pgmiddle); return 0; } pgtable = pte_offset(pgmiddle, addr); if (!pte_present(*pgtable)) { do_no_page(tsk, vma, addr, 0); goto repeat; } page = pte_page(*pgtable); /* this is a hack for non-kernel-mapped video buffers and similar */ if (page >= high_memory) return 0; page += addr & ~PAGE_MASK; return *(unsigned long *) page; }
inline int make_page_cow(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; spinlock_t *ptl; pgd = pgd_offset(mm, addr); if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) goto no_page; pud = pud_offset(pgd, addr); if (pud_none(*pud) || unlikely(pud_bad(*pud))) goto no_page; pmd = pmd_offset(pud, addr); if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) goto no_page; BUG_ON(pmd_trans_huge(*pmd)); pte = pte_offset_map_lock(mm, pmd, addr, &ptl); if (!pte_present(*pte)) { spin_unlock(ptl); goto no_page; } ptep_set_wrprotect(mm, addr, pte); spin_unlock(ptl); #if !defined(CONFIG_GRAPHENE_BULK_IPC) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0) my_flush_tlb_page(vma, addr); #else flush_tlb_page(vma, addr); #endif DEBUG("make page COW at %lx\n", addr); return 0; no_page: return -EFAULT; }
static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long next; pmd_t *pmdp; pmdp = pmd_offset(&pud, addr); do { pmd_t pmd = READ_ONCE(*pmdp); next = pmd_addr_end(addr, end); /* * If we find a splitting transparent hugepage we * return zero. That will result in taking the slow * path which will call wait_split_huge_page() * if the pmd is still in splitting state */ if (pmd_none(pmd) || pmd_trans_splitting(pmd)) return 0; if (pmd_huge(pmd) || pmd_large(pmd)) { /* * NUMA hinting faults need to be handled in the GUP * slowpath for accounting purposes and so that they * can be serialised against THP migration. */ if (pmd_numa(pmd)) return 0; if (!gup_hugepte((pte_t *)pmdp, PMD_SIZE, addr, next, write, pages, nr)) return 0; } else if (is_hugepd(pmdp)) { if (!gup_hugepd((hugepd_t *)pmdp, PMD_SHIFT, addr, next, write, pages, nr)) return 0; } else if (!gup_pte_range(pmd, addr, next, write, pages, nr)) return 0; } while (pmdp++, addr = next, addr != end); return 1; }
static void unmap_range(struct kvm *kvm, pgd_t *pgdp, unsigned long long start, u64 size) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; unsigned long long addr = start, end = start + size; u64 next; while (addr < end) { pgd = pgdp + pgd_index(addr); pud = pud_offset(pgd, addr); if (pud_none(*pud)) { addr = pud_addr_end(addr, end); continue; } pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) { addr = pmd_addr_end(addr, end); continue; } pte = pte_offset_kernel(pmd, addr); clear_pte_entry(kvm, pte, addr); next = addr + PAGE_SIZE; /* If we emptied the pte, walk back up the ladder */ if (page_empty(pte)) { clear_pmd_entry(kvm, pmd, addr); next = pmd_addr_end(addr, end); if (page_empty(pmd) && !page_empty(pud)) { clear_pud_entry(kvm, pud, addr); next = pud_addr_end(addr, end); } } addr = next; } }
static void __init zero_pmd_populate(pud_t *pud, unsigned long addr, unsigned long end) { pmd_t *pmd = pmd_offset(pud, addr); unsigned long next; do { next = pmd_addr_end(addr, end); if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) { pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte); continue; } if (pmd_none(*pmd)) { pmd_populate_kernel(&init_mm, pmd, early_alloc(PAGE_SIZE, NUMA_NO_NODE)); } zero_pte_populate(pmd, addr, next); } while (pmd++, addr = next, addr != end); }
int map_page(unsigned long va, unsigned long pa, int flags) { pmd_t *pd; pte_t *pg; int err = -ENOMEM; spin_lock(&init_mm.page_table_lock); /* Use upper 10 bits of VA to index the first level map */ pd = pmd_offset(pgd_offset_k(va), va); /* Use middle 10 bits of VA to index the second-level map */ pg = pte_alloc(&init_mm, pd, va); if (pg != 0) { err = 0; set_pte(pg, mk_pte_phys(pa & PAGE_MASK, __pgprot(flags))); if (mem_init_done) flush_HPTE(0, va, pg); } spin_unlock(&init_mm.page_table_lock); return err; }
static pte_t * pgtbl_lookup_address(paddr_t pgtbl, unsigned long addr) { pgd_t *pgd = ((pgd_t *)chal_pa2va((void*)pgtbl)) + pgd_index(addr); pud_t *pud; pmd_t *pmd; if (pgd_none(*pgd)) { return NULL; } pud = pud_offset(pgd, addr); if (pud_none(*pud)) { return NULL; } pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) { return NULL; } if (pmd_large(*pmd)) return (pte_t *)pmd; return pte_offset_kernel(pmd, addr); }
pte_t * huge_pte_offset (struct mm_struct *mm, unsigned long addr) { unsigned long taddr = htlbpage_to_page(addr); pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte = NULL; pgd = pgd_offset(mm, taddr); if (pgd_present(*pgd)) { pud = pud_offset(pgd, taddr); if (pud_present(*pud)) { pmd = pmd_offset(pud, taddr); if (pmd_present(*pmd)) pte = pte_offset_map(pmd, taddr); } } return pte; }
static unsigned get_pte_for_vaddr(unsigned vaddr) { struct task_struct *task = get_current(); struct mm_struct *mm = task->mm; pgd_t *pgd; pmd_t *pmd; pte_t *pte; if (!mm) mm = task->active_mm; pgd = pgd_offset(mm, vaddr); if (pgd_none_or_clear_bad(pgd)) return 0; pmd = pmd_offset(pgd, vaddr); if (pmd_none_or_clear_bad(pmd)) return 0; pte = pte_offset_map(pmd, vaddr); if (!pte) return 0; return pte_val(*pte); }
int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot) { pmd_t *pd; pte_t *pg; int err = -ENOMEM; /* Use upper 10 bits of VA to index the first level map */ pd = pmd_offset(pud_offset(pgd_offset_k(va), va), va); /* Use middle 10 bits of VA to index the second-level map */ if (likely(slab_is_available())) pg = pte_alloc_kernel(pd, va); else pg = early_pte_alloc_kernel(pd, va); if (pg != 0) { err = 0; /* The PTE should never be already set nor present in the * hash table */ BUG_ON((pte_present(*pg) | pte_hashpte(*pg)) && pgprot_val(prot)); set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot)); }
static void free_pud_range(pgd_t *pgd) { int i; pud_t *pud; pud = pud_offset(pgd, 0); for (i=0 ; i<PTRS_PER_PUD ; i++, pud++) { pmd_t *pmd; struct page *page; if (oleole_pud_none_or_clear_bad(pud)) continue; free_pmd_range(pud); pmd = pmd_offset(pud, 0); page = virt_to_page(pmd); __free_page(page); pud_clear(pud); } }
static inline pte_t *tpe_lookup_address(unsigned long address, unsigned int *level) { pgd_t *pgd = pgd_offset_k(address); pud_t *pud; pmd_t *pmd; pte_t *pte; if (pgd_none(*pgd)) return NULL; pud = pud_offset(pgd, address); if (!pud_present(*pud)) return NULL; pmd = pmd_offset(pud, address); if (!pmd_present(*pmd)) return NULL; if (pmd_large(*pmd)) return (pte_t *)pmd; pte = pte_offset_kernel(pmd, address); if (pte && !pte_present(*pte)) pte = NULL; return pte; }
/* * dump_cpd: Dumps the entire CPD for debugging. * * Notes: * - user = 0, all entries dumped; user = 1 only user entries dumped. * user = -1, dump domain 0 entries only */ void dump_cpd(int user) { int i; int domain; pmd_t *cpd_p = pmd_offset(pgd_offset_k(0), 0); /* Get CPD Address */ for (i = 0; i < PTRS_PER_PGD; i++) { domain = pmd_domain(cpd_p[i]); if ((user == -1) && (domain == 0) && pmd_val(cpd_p[i])) { printk("** dump_cpd() cpd[%d] 0x%x domain 0 **\n", i, (unsigned int)pmd_val(cpd_p[i])); continue; } if (!user || (domain >= DOMAIN_START && domain <= DOMAIN_END)) { printk("** dump_cpd() cpd[%d] 0x%x domain tag %d **\n", i, (unsigned int)pmd_val(cpd_p[i]), domain); } } printk("\n"); }