int m4u_v2p_new(unsigned int va) { unsigned int pmdOffset = (va & (PMD_SIZE - 1)); unsigned int pageOffset = (va & (PAGE_SIZE - 1)); pgd_t *pgd; pmd_t *pmd; pte_t *pte; unsigned int pa; printk("Enter m4u_user_v2p()! 0x%x\n", va); pgd = pgd_offset(current->mm, va); /* what is tsk->mm */ printk("m4u_user_v2p(), pgd 0x%x\n", pgd); printk("pgd_none=%d, pgd_bad=%d\n", pgd_none(*pgd), pgd_bad(*pgd)); if(pgd_none(*pgd)||pgd_bad(*pgd)) { printk("Error: m4u_user_v2p(), virtual addr 0x%x, pgd invalid! \n", va); return 0; } pmd = pmd_offset(pgd, va); printk("m4u_user_v2p(), pmd 0x%x\n", pmd); printk("pmd_none=%d, pmd_bad=%d, pmd_val=0x%x\n", pmd_none(*pmd), pmd_bad(*pmd), pmd_val(*pmd)); /* If this is a page table entry, keep on walking to the next level */ if (( (unsigned int)pmd_val(*pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE) { if(pmd_none(*pmd)||pmd_bad(*pmd)) { printk("Error: m4u_user_v2p(), virtual addr 0x%x, pmd invalid! \n", va); return 0; } pte = pte_offset_map(pmd, va); printk("m4u_user_v2p(), pte 0x%x\n", pte); if(pte_present(*pte)) { pa=(pte_val(*pte) & (PAGE_MASK)) | pageOffset; printk("PA = 0x%8x\n", pa); return pa; } } else /* Only 1 level page table */ { if(pmd_none(*pmd)) { printk("Error: m4u_user_v2p(), virtual addr 0x%x, pmd invalid! \n", va); return 0; } pa=(pte_val(*pmd) & (PMD_MASK)) | pmdOffset; printk("PA = 0x%8x\n", pa); return pa; } return 0; }
static inline int copy_pmd_range(pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long address, unsigned long size, int cow) { pmd_t * src_pmd, * dst_pmd; unsigned long end; int error = 0; if (pgd_none(*src_pgd)) return 0; if (pgd_bad(*src_pgd)) { printk("copy_pmd_range: bad pgd (%08lx)\n", pgd_val(*src_pgd)); pgd_clear(src_pgd); return 0; } src_pmd = pmd_offset(src_pgd, address); if (pgd_none(*dst_pgd)) { if (!pmd_alloc(dst_pgd, 0)) return -ENOMEM; } dst_pmd = pmd_offset(dst_pgd, address); address &= ~PGDIR_MASK; end = address + size; if (end > PGDIR_SIZE) end = PGDIR_SIZE; do { error = copy_pte_range(dst_pmd++, src_pmd++, address, end - address, cow); if (error) break; address = (address + PMD_SIZE) & PMD_MASK; } while (address < end); return error; }
/* * This function zeroes out partial mmap'ed pages at truncation time.. */ static void partial_clear(struct vm_area_struct *vma, unsigned long address) { pgd_t *page_dir; pmd_t *page_middle; pte_t *page_table, pte; page_dir = pgd_offset(vma->vm_mm, address); if (pgd_none(*page_dir)) return; if (pgd_bad(*page_dir)) { printk("bad page table directory entry %p:[%lx]\n", page_dir, pgd_val(*page_dir)); pgd_clear(page_dir); return; } page_middle = pmd_offset(page_dir, address); if (pmd_none(*page_middle)) return; if (pmd_bad(*page_middle)) { printk("bad page table directory entry %p:[%lx]\n", page_dir, pgd_val(*page_dir)); pmd_clear(page_middle); return; } page_table = pte_offset(page_middle, address); pte = *page_table; if (!pte_present(pte)) return; flush_cache_page(vma, address); address &= ~PAGE_MASK; address += pte_page(pte); if (address >= high_memory) return; memset((void *) address, 0, PAGE_SIZE - (address & ~PAGE_MASK)); flush_page_to_ram(pte_page(pte)); }
static void shmedia_unmapioaddr(unsigned long vaddr) { pgd_t *pgdp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep; pgdp = pgd_offset_k(vaddr); if (pgd_none(*pgdp) || pgd_bad(*pgdp)) return; pudp = pud_offset(pgdp, vaddr); if (pud_none(*pudp) || pud_bad(*pudp)) return; pmdp = pmd_offset(pudp, vaddr); if (pmd_none(*pmdp) || pmd_bad(*pmdp)) return; ptep = pte_offset_kernel(pmdp, vaddr); if (pte_none(*ptep) || !pte_present(*ptep)) return; clear_page((void *)ptep); pte_clear(&init_mm, vaddr, ptep); }
static inline void remove_mapping_pmd_range (pgd_t *pgd, unsigned long address, unsigned long size) { pmd_t *pmd; unsigned long end; if (pgd_none (*pgd)) return; if (pgd_bad (*pgd)){ printk ("remove_graphics_pmd_range: bad pgd (%08lx)\n", pgd_val (*pgd)); pgd_clear (pgd); return; } pmd = pmd_offset (pgd, address); address &= ~PGDIR_MASK; end = address + size; if (end > PGDIR_SIZE) end = PGDIR_SIZE; do { remove_mapping_pte_range (pmd, address, end - address); address = (address + PMD_SIZE) & PMD_MASK; pmd++; } while (address < end); }
static inline void unswap_pgd(struct vm_area_struct * vma, pgd_t *dir, unsigned long address, unsigned long size, unsigned long entry, unsigned long page /* , int isswap */) { pmd_t * pmd; unsigned long offset, end; if (pgd_none(*dir)) return; if (pgd_bad(*dir)) { printk("unswap_pgd: bad pgd (%08lx)\n", pgd_val(*dir)); pgd_clear(dir); return; } pmd = pmd_offset(dir, address); offset = address & PGDIR_MASK; address &= ~PGDIR_MASK; end = address + size; if (end > PGDIR_SIZE) end = PGDIR_SIZE; do { unswap_pmd(vma, pmd, address, end - address, offset, entry, page /* , isswap */); address = (address + PMD_SIZE) & PMD_MASK; pmd++; } while (address < end); }
static inline int unuse_pgd(struct vm_area_struct * vma, pgd_t *dir, unsigned long address, unsigned long size, unsigned int type, unsigned long page) { pmd_t * pmd; unsigned long offset, end; if (pgd_none(*dir)) return 0; if (pgd_bad(*dir)) { printk("unuse_pgd: bad pgd (%08lx)\n", pgd_val(*dir)); pgd_clear(dir); return 0; } pmd = pmd_offset(dir, address); offset = address & PGDIR_MASK; address &= ~PGDIR_MASK; end = address + size; if (end > PGDIR_SIZE) end = PGDIR_SIZE; do { if (unuse_pmd(vma, pmd, address, end - address, offset, type, page)) return 1; address = (address + PMD_SIZE) & PMD_MASK; pmd++; } while (address < end); return 0; }
static struct page* my_follow_page(struct vm_area_struct *vma, unsigned long addr) { pud_t *pud = NULL; pmd_t *pmd = NULL; pgd_t *pgd = NULL; pte_t *pte = NULL; spinlock_t *ptl = NULL; struct page* page = NULL; struct mm_struct *mm = current->mm; pgd = pgd_offset(current->mm, addr); if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) { goto out; } pud = pud_offset(pgd, addr); if (pud_none(*pud) || unlikely(pud_bad(*pud))) { goto out; } printk("aaaa\n"); pmd = pmd_offset(pud, addr); if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) { goto out; } pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl); printk("bbbb\n"); if (!pte) goto out; printk("cccc\n"); if (!pte_present(*pte)) goto unlock; page = pfn_to_page(pte_pfn(*pte)); if (!page) goto unlock; get_page(page); unlock: pte_unmap_unlock(pte, ptl); out: return page; }
static inline int swap_out_pgd(struct mm_struct * mm, struct vm_area_struct * vma, pgd_t *dir, unsigned long address, unsigned long end, int gfp_mask) { pmd_t * pmd; unsigned long pgd_end; if (pgd_none(*dir)) return 0; if (pgd_bad(*dir)) { pgd_ERROR(*dir); pgd_clear(dir); return 0; } pmd = pmd_offset(dir, address); pgd_end = (address + PGDIR_SIZE) & PGDIR_MASK; if (pgd_end && (end > pgd_end)) end = pgd_end; do { int result = swap_out_pmd(mm, vma, pmd, address, end, gfp_mask); if (result) return result; if (!mm->swap_cnt) return 0; address = (address + PMD_SIZE) & PMD_MASK; pmd++; } while (address && (address < end)); return 0; }
/* * This routine gets a long from any process space by following the page * tables. NOTE! You should check that the long isn't on a page boundary, * and that it is in the task area before calling this: this routine does * no checking. */ static unsigned long get_long(struct vm_area_struct * vma, unsigned long addr) { pgd_t * pgdir; pte_t * pgtable; unsigned long page; repeat: pgdir = PAGE_DIR_OFFSET(vma->vm_mm, addr); if (pgd_none(*pgdir)) { do_no_page(vma, addr, 0); goto repeat; } if (pgd_bad(*pgdir)) { printk("ptrace: bad page directory %08lx\n", pgd_val(*pgdir)); pgd_clear(pgdir); return 0; } pgtable = (pte_t *) (PAGE_PTR(addr) + pgd_page(*pgdir)); if (!pte_present(*pgtable)) { do_no_page(vma, addr, 0); goto repeat; } page = pte_page(*pgtable); /* this is a hack for non-kernel-mapped video buffers and similar */ if (page >= high_memory) return 0; page += addr & ~PAGE_MASK; return *(unsigned long *) page; }
static inline int filemap_sync_pmd_range(pgd_t * pgd, unsigned long address, unsigned long end, struct vm_area_struct *vma, unsigned int flags) { pmd_t * pmd; int error; if (pgd_none(*pgd)) return 0; if (pgd_bad(*pgd)) { pgd_ERROR(*pgd); pgd_clear(pgd); return 0; } pmd = pmd_offset(pgd, address); if ((address & PGDIR_MASK) != (end & PGDIR_MASK)) end = (address & PGDIR_MASK) + PGDIR_SIZE; error = 0; do { error |= filemap_sync_pte_range(pmd, address, end, vma, flags); address = (address + PMD_SIZE) & PMD_MASK; pmd++; } while (address && (address < end)); return error; }
/* * Dump out the page tables associated with 'addr' in mm 'mm'. */ void show_pte(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; if (!mm) mm = &init_mm; pr_alert("pgd = %p\n", mm->pgd); pgd = pgd_offset(mm, addr); pr_alert("[%08lx] *pgd=%016llx", addr, pgd_val(*pgd)); do { pud_t *pud; pmd_t *pmd; pte_t *pte; if (pgd_none(*pgd) || pgd_bad(*pgd)) break; pud = pud_offset(pgd, addr); if (pud_none(*pud) || pud_bad(*pud)) break; pmd = pmd_offset(pud, addr); printk(", *pmd=%016llx", pmd_val(*pmd)); if (pmd_none(*pmd) || pmd_bad(*pmd)) break; pte = pte_offset_map(pmd, addr); printk(", *pte=%016llx", pte_val(*pte)); pte_unmap(pte); } while(0); printk("\n"); }
static int pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp) { unsigned long addr = (unsigned long)_addr; pgd_t *pgd; pmd_t *pmd; pte_t *pte; pud_t *pud; spinlock_t *ptl; pgd = pgd_offset(current->mm, addr); if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd))) return 0; pud = pud_offset(pgd, addr); if (unlikely(pud_none(*pud) || pud_bad(*pud))) return 0; pmd = pmd_offset(pud, addr); if (unlikely(pmd_none(*pmd) || pmd_bad(*pmd))) return 0; pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl); if (unlikely(!pte_present(*pte) || !pte_young(*pte) || !pte_write(*pte) || !pte_dirty(*pte))) { pte_unmap_unlock(pte, ptl); return 0; } *ptep = pte; *ptlp = ptl; return 1; }
u32 imm_get_physical(void *v, u32 immid) { pmd_t *pmd; pte_t *pte; pgd_t *pgd; u32 val = 0, virtual = (u32)v; struct mm_struct* mm; if (IMMID_USER(immid)) mm = current->mm; else mm = &init_mm; pgd = pgd_offset(mm, virtual); if (!pgd_none(*pgd) && !pgd_bad(*pgd)) { /* 1st level entry pointer */ pmd = pmd_offset(pgd, virtual); if (!pmd_none(*pmd) && !pmd_bad(*pmd)) { /* 2nd level entry pointer */ pte = pte_offset_kernel(pmd, virtual); if (pte) { val = (*(u32 *)((u32)pte-2048))&PAGE_MASK; val += virtual%PAGE_SIZE; } } else if (!pmd_none(*pmd)) {
/* mmlist_lock and vma->vm_mm->page_table_lock are held */ static inline void unuse_pgd(struct vm_area_struct * vma, pgd_t *dir, unsigned long address, unsigned long size, swp_entry_t entry, struct page* page) { pmd_t * pmd; unsigned long offset, end; if (pgd_none(*dir)) return; if (pgd_bad(*dir)) { pgd_ERROR(*dir); pgd_clear(dir); return; } pmd = pmd_offset(dir, address); offset = address & PGDIR_MASK; address &= ~PGDIR_MASK; end = address + size; if (end > PGDIR_SIZE) end = PGDIR_SIZE; if (address >= end) BUG(); do { unuse_pmd(vma, pmd, address, end - address, offset, entry, page); address = (address + PMD_SIZE) & PMD_MASK; pmd++; } while (address && (address < end)); }
static inline int zap_pmd_range(struct mm_struct *mm, pgd_t * dir, unsigned long address, unsigned long size) { pmd_t * pmd; unsigned long end; int freed; if (pgd_none(*dir)) return 0; if (pgd_bad(*dir)) { pgd_ERROR(*dir); pgd_clear(dir); return 0; } pmd = pmd_offset(dir, address); address &= ~PGDIR_MASK; end = address + size; if (end > PGDIR_SIZE) end = PGDIR_SIZE; freed = 0; do { freed += zap_pte_range(mm, pmd, address, end - address); address = (address + PMD_SIZE) & PMD_MASK; pmd++; } while (address < end); return freed; }
static inline void iterate_pmd(pgd_t * dir, unsigned long address, unsigned long size, pte_iterator_t op, unsigned long arg) { pmd_t *pmd; unsigned long end; if (pgd_none(*dir)) return; if (pgd_bad(*dir)) { pgd_ERROR(*dir); pgd_clear(dir); return; } pmd = pmd_offset(dir, address); address &= ~PGDIR_MASK; end = address + size; if (end > PGDIR_SIZE) end = PGDIR_SIZE; do { iterate_pte(pmd, address, end - address, op, arg); address = (address + PMD_SIZE) & PMD_MASK; pmd++; } while (address < end); }
/** * Replace the PFN of a PTE with the address of the actual page. * * The caller maps a reserved dummy page at the address with the desired access * and flags. * * This hack is required for older Linux kernels which don't provide * remap_pfn_range(). * * @returns 0 on success, -ENOMEM on failure. * @param mm The memory context. * @param ulAddr The mapping address. * @param Phys The physical address of the page to map. */ static int rtR0MemObjLinuxFixPte(struct mm_struct *mm, unsigned long ulAddr, RTHCPHYS Phys) { int rc = -ENOMEM; pgd_t *pgd; spin_lock(&mm->page_table_lock); pgd = pgd_offset(mm, ulAddr); if (!pgd_none(*pgd) && !pgd_bad(*pgd)) { pmd_t *pmd = pmd_offset(pgd, ulAddr); if (!pmd_none(*pmd)) { pte_t *ptep = pte_offset_map(pmd, ulAddr); if (ptep) { pte_t pte = *ptep; pte.pte_high &= 0xfff00000; pte.pte_high |= ((Phys >> 32) & 0x000fffff); pte.pte_low &= 0x00000fff; pte.pte_low |= (Phys & 0xfffff000); set_pte(ptep, pte); pte_unmap(ptep); rc = 0; } }
/* * Do a quick page-table lookup for a single page. */ static struct page * follow_page(struct mm_struct *mm, unsigned long address, int write) { pgd_t *pgd; pmd_t *pmd; pte_t *ptep, pte; pgd = pgd_offset(mm, address); if (pgd_none(*pgd) || pgd_bad(*pgd)) goto out; pmd = pmd_offset(pgd, address); if (pmd_none(*pmd) || pmd_bad(*pmd)) goto out; ptep = pte_offset(pmd, address); if (!ptep) goto out; pte = *ptep; if (pte_present(pte)) { if (!write || (pte_write(pte) && pte_dirty(pte))) return pte_page(pte); } out: return 0; }
/* mm->page_table_lock is held. mmap_sem is not held */ static inline int swap_out_pgd(struct mm_struct * mm, struct vm_area_struct * vma, pgd_t *dir, unsigned long address, unsigned long end, int count, zone_t * classzone) { pmd_t * pmd; unsigned long pgd_end; if (pgd_none(*dir)) return count; if (pgd_bad(*dir)) { pgd_ERROR(*dir); pgd_clear(dir); return count; } pmd = pmd_offset(dir, address); pgd_end = (address + PGDIR_SIZE) & PGDIR_MASK; if (pgd_end && (end > pgd_end)) end = pgd_end; do { count = swap_out_pmd(mm, vma, pmd, address, end, count, classzone); if (!count) break; /* lock depth can be 1 or 2 */ if (conditional_schedule_needed()) return count; address = (address + PMD_SIZE) & PMD_MASK; pmd++; } while (address && (address < end)); return count; }
static unsigned long get_phys_addr(struct task_struct * p, unsigned long ptr) { pgd_t *page_dir; pmd_t *page_middle; pte_t pte; if (!p || !p->mm || ptr >= TASK_SIZE) return 0; page_dir = pgd_offset(p->mm,ptr); if (pgd_none(*page_dir)) return 0; if (pgd_bad(*page_dir)) { printk("bad page directory entry %08lx\n", pgd_val(*page_dir)); pgd_clear(page_dir); return 0; } page_middle = pmd_offset(page_dir,ptr); if (pmd_none(*page_middle)) return 0; if (pmd_bad(*page_middle)) { printk("bad page middle entry %08lx\n", pmd_val(*page_middle)); pmd_clear(page_middle); return 0; } pte = *pte_offset(page_middle,ptr); if (!pte_present(pte)) return 0; return pte_page(pte) + (ptr & ~PAGE_MASK); }
static inline pte_t *get_one_pte(struct mm_struct *mm, unsigned long addr) { pgd_t * pgd; pmd_t * pmd; pte_t * pte = NULL; pgd = pgd_offset(mm, addr); if (pgd_none(*pgd)) goto end; if (pgd_bad(*pgd)) { printk("move_one_page: bad source pgd (%08lx)\n", pgd_val(*pgd)); pgd_clear(pgd); goto end; } pmd = pmd_offset(pgd, addr); if (pmd_none(*pmd)) goto end; if (pmd_bad(*pmd)) { printk("move_one_page: bad source pmd (%08lx)\n", pmd_val(*pmd)); pmd_clear(pmd); goto end; } pte = pte_offset(pmd, addr); if (pte_none(*pte)) pte = NULL; end: return pte; }
static inline pte_t *get_one_pte(struct mm_struct *mm, unsigned long addr) { pgd_t * pgd; pmd_t * pmd; pte_t * pte = NULL; pgd = pgd_offset(mm, addr); if (pgd_none(*pgd)) goto end; if (pgd_bad(*pgd)) { pgd_ERROR(*pgd); pgd_clear(pgd); goto end; } pmd = pmd_offset(pgd, addr); if (pmd_none(*pmd)) goto end; if (pmd_bad(*pmd)) { pmd_ERROR(*pmd); pmd_clear(pmd); goto end; } pte = pte_offset(pmd, addr); if (pte_none(*pte)) pte = NULL; end: return pte; }
/* this routine handles present pages, when users try to write to a shared page. */ void do_wp_page(struct vm_area_struct *vma, unsigned long address, int write_access) { pgd_t *pgd; pmd_t *pmd; pte_t *page_table,pte; unsigned long old_page, new_page; new_page = get_free_page(GFP_KERNEL); pgd = pgd_offset(vma->vm_task, address); if(pgd_none(*pgd)) goto end_wp_page; if(pgd_bad(*pgd)) goto bad_wp_page; pmd = pmd_offset(pgd,address); if(pmd_none(*pmd)) goto end_wp_page; if(pmd_bad(*pmd)) goto bad_wp_page; page_table = pte_offset(pmd,address); pte = *page_table; if(!pte_present(pte)) goto end_wp_page; if(pte_write(pte)) goto end_wp_page; old_page = pte_page(pte); if(old_page >= main_memory_end) goto bad_wp_page; (vma->vm_task->mm->min_flt)++; if(mem_map[MAP_NR(old_page)].flags & PAGE_PRESENT) { if(new_page) { if(mem_map[MAP_NR(old_page)].flags & MAP_PAGE_RESERVED) ++(vma->vm_task->mm->rss); copy_page(old_page, new_page); *page_table = pte_mkwrite(pte_mkdirty(mk_pte((unsigned long)&new_page, vma->vm_page_prot))); free_page(old_page); return; } pte_val(*page_table) &= PAGE_BAD; free_page(old_page); oom(); return; } *page_table = pte_mkdirty(pte_mkwrite(pte)); if(new_page) free_page(new_page); return; bad_wp_page: printk("do_wp_page: bogus page at address %08lx (%08lx)\n",address,old_page); goto end_wp_page; end_wp_page: if(new_page) free_page(new_page); return; }
static int check_one_table (struct pde *page_dir) { if (pgd_none (*page_dir)) return 0; if (pgd_bad (*page_dir)) return 1; return 0; }
static int mem_write(struct inode * inode, struct file * file,char * buf, int count) { pgd_t *page_dir; pmd_t *page_middle; pte_t pte; char * page; struct task_struct * tsk; unsigned long addr; char *tmp; int i; if (count < 0) return -EINVAL; addr = file->f_pos; tsk = get_task(inode->i_ino >> 16); if (!tsk) return -ESRCH; tmp = buf; while (count > 0) { if (current->signal & ~current->blocked) break; page_dir = pgd_offset(tsk,addr); if (pgd_none(*page_dir)) break; if (pgd_bad(*page_dir)) { printk("Bad page dir entry %08lx\n", pgd_val(*page_dir)); pgd_clear(page_dir); break; } page_middle = pmd_offset(page_dir,addr); if (pmd_none(*page_middle)) break; if (pmd_bad(*page_middle)) { printk("Bad page middle entry %08lx\n", pmd_val(*page_middle)); pmd_clear(page_middle); break; } pte = *pte_offset(page_middle,addr); if (!pte_present(pte)) break; if (!pte_write(pte)) break; page = (char *) pte_page(pte) + (addr & ~PAGE_MASK); i = PAGE_SIZE-(addr & ~PAGE_MASK); if (i > count) i = count; memcpy_fromfs(page, tmp, i); addr += i; tmp += i; count -= i; } file->f_pos = addr; if (tmp != buf) return tmp-buf; if (current->signal & ~current->blocked) return -ERESTARTSYS; return 0; }
asmlinkage long sys_my_syscall( int pid, unsigned long address) { struct task_struct* task; struct mm_struct* mm; pgd_t* pgd; pud_t* pud; pmd_t* pmd; pte_t* pte; unsigned long pte_val ; printk(KERN_INFO "PID: %d, VIRTUAL_ADDR: 0x%lx\n", pid, address); for_each_process(task) { if(task->pid == pid) { printk(KERN_INFO "Task %d found\n", task->pid); mm = task->mm; pgd = pgd_offset(mm, address); printk(KERN_INFO "PGD INFO: PRESENT: %d, BAD: %d, NONE: %d\n", pgd_present(*pgd), pgd_bad(*pgd), pgd_none(*pgd)); if(!(pgd_none(*pgd) || pgd_bad(*pgd)) && pgd_present(*pgd)) { printk(KERN_INFO "PGD INFO: PRESENT: %d, BAD: %d, NONE: %d\n", pgd_present(*pgd), pgd_bad(*pgd), pgd_none(*pgd)); pud = pud_offset(pgd, address); printk(KERN_INFO "PUD INFO: PRESENT: %d, BAD: %d, NONE: %d\n", pud_present(*pud), pud_bad(*pud), pud_none(*pud)); if(!(pud_none(*pud) || pud_bad(*pud)) && pud_present(*pud)) { printk(KERN_INFO "PUD INFO: PRESENT: %d, BAD: %d, NONE: %d\n", pud_present(*pud), pud_bad(*pud), pud_none(*pud)); pmd = pmd_offset(pud, address); printk(KERN_INFO "PMD INFO: PRESENT: %d, BAD: %d, NONE: %d\n", pmd_present(*pmd), pmd_bad(*pmd), pmd_none(*pmd)); if(!(pmd_none(*pmd) || pmd_bad(*pmd)) && pmd_present(*pmd)) { printk(KERN_INFO "PMD INFO: PRESENT: %d, BAD: %d, NONE: %d\n", pmd_present(*pmd), pmd_bad(*pmd), pmd_none(*pmd)); pte = pte_offset_map(pmd, address); printk(KERN_INFO "PTE INFO: PRESENT: %d PTE: 0x%lx \n ", pte_present(*pte), pte->pte); pte_val = pte->pte; if(pte_val == 0) pte_val = __pte_to_swp_entry(*pte).val; pte_unmap(pte); printk(KERN_INFO "pte_val: %lx\n" , pte_val); return pte_val; } } } } } printk(KERN_INFO "Data not found!\n"); return 0; }
/* * This routine gets a long from any process space by following the page * tables. NOTE! You should check that the long isn't on a page boundary, * and that it is in the task area before calling this: this routine does * no checking. */ static unsigned long get_long(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long addr) { pgd_t * pgdir; pmd_t * pgmiddle; pte_t * pgtable; unsigned long page; int fault; repeat: pgdir = pgd_offset(vma->vm_mm, addr); if (pgd_none(*pgdir)) { fault = handle_mm_fault(tsk, vma, addr, 0); if (fault > 0) goto repeat; if (fault < 0) force_sig(SIGKILL, tsk); return 0; } if (pgd_bad(*pgdir)) { printk("ptrace[1]: bad page directory %lx\n", pgd_val(*pgdir)); pgd_clear(pgdir); return 0; } pgmiddle = pmd_offset(pgdir, addr); if (pmd_none(*pgmiddle)) { fault = handle_mm_fault(tsk, vma, addr, 0); if (fault > 0) goto repeat; if (fault < 0) force_sig(SIGKILL, tsk); return 0; } if (pmd_bad(*pgmiddle)) { printk("ptrace[3]: bad pmd %lx\n", pmd_val(*pgmiddle)); pmd_clear(pgmiddle); return 0; } pgtable = pte_offset(pgmiddle, addr); if (!pte_present(*pgtable)) { fault = handle_mm_fault(tsk, vma, addr, 0); if (fault > 0) goto repeat; if (fault < 0) force_sig(SIGKILL, tsk); return 0; } page = pte_page(*pgtable); /* this is a hack for non-kernel-mapped video buffers and similar */ if (MAP_NR(page) >= max_mapnr) return 0; page += addr & ~PAGE_MASK; return *(unsigned long *) page; }
unsigned int pmem_user_v2p_video(unsigned int va) { unsigned int pageOffset = (va & (PAGE_SIZE - 1)); pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; unsigned int pa; if(NULL==current) { MFV_LOGE("[ERROR] pmem_user_v2p_video, current is NULL! \n"); return 0; } if(NULL==current->mm) { MFV_LOGE("[ERROR] pmem_user_v2p_video, current->mm is NULL! tgid=0x%x, name=%s \n", current->tgid, current->comm); return 0; } pgd = pgd_offset(current->mm, va); /* what is tsk->mm */ if(pgd_none(*pgd)||pgd_bad(*pgd)) { MFV_LOGE("[ERROR] pmem_user_v2p(), va=0x%x, pgd invalid! \n", va); return 0; } pud = pud_offset(pgd, va); if(pud_none(*pud)||pud_bad(*pud)) { MFV_LOGE("[ERROR] pmem_user_v2p(), va=0x%x, pud invalid! \n", va); return 0; } pmd = pmd_offset(pud, va); if(pmd_none(*pmd)||pmd_bad(*pmd)) { MFV_LOGE("[ERROR] pmem_user_v2p(), va=0x%x, pmd invalid! \n", va); return 0; } pte = pte_offset_map(pmd, va); if(pte_present(*pte)) { pa=(pte_val(*pte) & (PAGE_MASK)) | pageOffset; pte_unmap(pte); return pa; } MFV_LOGE("[ERROR] pmem_user_v2p(), va=0x%x, pte invalid! \n", va); return 0; }
static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end, phys_addr_t phys, pgprot_t prot, phys_addr_t (*pgtable_alloc)(void)) { pud_t *pud; unsigned long next; if (pgd_none(*pgd)) { phys_addr_t pud_phys; BUG_ON(!pgtable_alloc); pud_phys = pgtable_alloc(); __pgd_populate(pgd, pud_phys, PUD_TYPE_TABLE); } BUG_ON(pgd_bad(*pgd)); pud = pud_set_fixmap_offset(pgd, addr); do { next = pud_addr_end(addr, end); /* * For 4K granule only, attempt to put down a 1GB block */ if (use_1G_block(addr, next, phys) && block_mappings_allowed(pgtable_alloc)) { pud_t old_pud = *pud; pud_set_huge(pud, phys, prot); /* * If we have an old value for a pud, it will * be pointing to a pmd table that we no longer * need (from swapper_pg_dir). * * Look up the old pmd table and free it. */ if (!pud_none(old_pud)) { flush_tlb_all(); if (pud_table(old_pud)) { phys_addr_t table = pud_page_paddr(old_pud); if (!WARN_ON_ONCE(slab_is_available())) memblock_free(table, PAGE_SIZE); } } } else { alloc_init_pmd(pud, addr, next, phys, prot, pgtable_alloc); } phys += next - addr; } while (pud++, addr = next, addr != end); pud_clear_fixmap(); }