Ejemplo n.º 1
0
int pud_huge(pud_t pud)
{
#ifndef __PAGETABLE_PMD_FOLDED
	return pud_val(pud) && !(pud_val(pud) & PUD_TABLE_BIT);
#else
	return 0;
#endif
}
Ejemplo n.º 2
0
static int relocate_restore_code(void)
{
	pgd_t *pgd;
	pud_t *pud;

	relocated_restore_code = get_safe_page(GFP_ATOMIC);
	if (!relocated_restore_code)
		return -ENOMEM;

	memcpy((void *)relocated_restore_code, &core_restore_code, PAGE_SIZE);

	/* Make the page containing the relocated code executable */
	pgd = (pgd_t *)__va(read_cr3()) + pgd_index(relocated_restore_code);
	pud = pud_offset(pgd, relocated_restore_code);
	if (pud_large(*pud)) {
		set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
	} else {
		pmd_t *pmd = pmd_offset(pud, relocated_restore_code);

		if (pmd_large(*pmd)) {
			set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
		} else {
			pte_t *pte = pte_offset_kernel(pmd, relocated_restore_code);

			set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX));
		}
	}
	__flush_tlb_all();

	return 0;
}
Ejemplo n.º 3
0
static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
	unsigned long prot)
{
	pmd_t *pmd;
	unsigned long next;

	if (pud_none_or_clear_bad(pud) || (pud_val(*pud) & L_PGD_SWAPPER)) {
		pmd = pmd_alloc_one(&init_mm, addr);
		if (!pmd) {
			pr_warning("Failed to allocate identity pmd.\n");
			return;
		}
		/*
		 * Copy the original PMD to ensure that the PMD entries for
		 * the kernel image are preserved.
		 */
		if (!pud_none(*pud))
			memcpy(pmd, pmd_offset(pud, 0),
			       PTRS_PER_PMD * sizeof(pmd_t));
		pud_populate(&init_mm, pud, pmd);
		pmd += pmd_index(addr);
	} else
		pmd = pmd_offset(pud, addr);

	do {
		next = pmd_addr_end(addr, end);
		*pmd = __pmd((addr & PMD_MASK) | prot);
		flush_pmd_entry(pmd);
	} while (pmd++, addr = next, addr != end);
}
Ejemplo n.º 4
0
static int copy_pud(pgd_t *dst_pgdp, pgd_t *src_pgdp, unsigned long start,
		    unsigned long end)
{
	pud_t *dst_pudp;
	pud_t *src_pudp;
	unsigned long next;
	unsigned long addr = start;

	if (pgd_none(READ_ONCE(*dst_pgdp))) {
		dst_pudp = (pud_t *)get_safe_page(GFP_ATOMIC);
		if (!dst_pudp)
			return -ENOMEM;
		pgd_populate(&init_mm, dst_pgdp, dst_pudp);
	}
	dst_pudp = pud_offset(dst_pgdp, start);

	src_pudp = pud_offset(src_pgdp, start);
	do {
		pud_t pud = READ_ONCE(*src_pudp);

		next = pud_addr_end(addr, end);
		if (pud_none(pud))
			continue;
		if (pud_table(pud)) {
			if (copy_pmd(dst_pudp, src_pudp, addr, next))
				return -ENOMEM;
		} else {
			set_pud(dst_pudp,
				__pud(pud_val(pud) & ~PMD_SECT_RDONLY));
		}
	} while (dst_pudp++, src_pudp++, addr = next, addr != end);

	return 0;
}
Ejemplo n.º 5
0
static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
{ 
	int i = pud_index(addr);


	for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE ) {
		unsigned long pmd_phys;
		pud_t *pud = pud_page + pud_index(addr);
		pmd_t *pmd;

		if (addr >= end)
			break;

		if (!after_bootmem && !e820_any_mapped(addr,addr+PUD_SIZE,0)) {
			set_pud(pud, __pud(0)); 
			continue;
		} 

		if (pud_val(*pud)) {
			phys_pmd_update(pud, addr, end);
			continue;
		}

		pmd = alloc_low_page(&pmd_phys);
		spin_lock(&init_mm.page_table_lock);
		set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
		phys_pmd_init(pmd, addr, end);
		spin_unlock(&init_mm.page_table_lock);
		unmap_low_page(pmd);
	}
	__flush_tlb();
} 
Ejemplo n.º 6
0
static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
{
        pmd_t *pmd = pmd_offset(pud, 0);
        unsigned long addr;
        unsigned i;

        for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
                addr = start + i * PMD_SIZE;
#ifdef CONFIG_ARM64
		if (pmd_none(*pmd) || pmd_sect (*pmd)) {
#else
		if (pmd_none(*pmd) || pmd_large(*pmd) || !pmd_present(*pmd)) {
#endif
                        note_page(st, addr, 3, pmd_val(*pmd));
                } else {
                        walk_pte(st, pmd, addr);
		}
#ifdef CONFIG_ARM
                if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1])) 
                        note_page(st, addr + SECTION_SIZE, 3, pmd_val(pmd[1]));
#endif
        }
}

static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
{
        pud_t *pud = pud_offset(pgd, 0);
        unsigned long addr;
        unsigned i;

        for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
                addr = start + i * PUD_SIZE;
#if defined CONFIG_ARM64 && !defined (CONFIG_ANDROID)
		if (pud_none (*pud) || pud_sect (*pud)) {
			note_page (st, addr, 2, pud_val (*pud));
		} else {
		       	walk_pmd (st, pud, addr);
		}
#else
                if (!pud_none(*pud)) {
			walk_pmd (st, pud, addr);
		} else {
			note_page (st, addr, 2, pud_val (*pud));
		}
#endif
        }
}
Ejemplo n.º 7
0
unsigned long virtaddr_to_physaddr(struct mm_struct *mm, unsigned long vaddr)
{
    pgd_t *pgd;
    pud_t *pud;
    pmd_t *pmd;
    pte_t *pte;
    unsigned long paddr = 0;

    pgd = pgd_offset(mm, vaddr);
    printk("pgd_val = 0x%lx\n", pgd_val(*pgd));
    printk("pgd_index = %lu\n", pgd_index(vaddr));
    if (pgd_none(*pgd)) {
        printk("not mapped in pgd\n");
        return INVALID_ADDR;
    }

    pud = pud_offset(pgd, vaddr);
    printk("pud_val = 0x%lx\n", pud_val(*pud));
    printk("pud_index = %lu\n", pud_index(vaddr));
    if (pud_none(*pud)) {
        printk("not mapped in pud\n");
        return INVALID_ADDR;
    }

    pmd = pmd_offset(pud, vaddr);
    printk("pmd_val = 0x%lx\n", pmd_val(*pmd));
    printk("pmd_index = %lx\n", pmd_index(vaddr));
    if(pmd_none(*pmd)){
        printk("not mapped in pmd\n");
        return INVALID_ADDR;
    }
    /*If pmd_large is true, represent pmd is the last level*/
    if(pmd_large(*pmd)){
        paddr = (pmd_val(*pmd) & PAGE_MASK);
        paddr = paddr | (vaddr & ~PAGE_MASK);
        return paddr;
    }
    /*Walk the forth level page table
    ** you may use PAGE_MASK = 0xfffffffffffff000 to help you get [0:11] bits
    ***/
    else{
        /* XXX: Need to implement */
    	pte = pte_offset_kernel(pmd, vaddr);
    	printk("pte_val = 0x%lx\n", pte_val(*pte));
    	printk("pte_index = %lx\n", pte_index(vaddr));
    	if(pte_none(*pte)){
    	    printk("not mapped in pte\n");
    	    return INVALID_ADDR;
    	}
        paddr = (pte_val(*pte) & PAGE_MASK);
        paddr = paddr | (vaddr & ~PAGE_MASK);
    	printk("paddr = %lx\n", paddr);
    	printk("__pa = %lx\n", __pa(vaddr));   /* magic macro in the kernel */
        /* End of implement */
        return paddr;
    }

}
Ejemplo n.º 8
0
void split_pud(pud_t *old_pud, pmd_t *pmd)
{
	unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
	pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
	int i = 0;

	do {
		set_pmd(pmd, __pmd(addr | prot));
		addr += PMD_SIZE;
	} while (pmd++, i++, i < PTRS_PER_PMD);
}
Ejemplo n.º 9
0
static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
			 unsigned long kernel)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

	pgd = pgd_offset(mm, proc);
	pud = pud_alloc(mm, pgd, proc);
	if (!pud)
		goto out;

	pmd = pmd_alloc(mm, pud, proc);
	if (!pmd)
		goto out_pmd;

	pte = pte_alloc_map(mm, pmd, proc);
	if (!pte)
		goto out_pte;

	/* There's an interaction between the skas0 stub pages, stack
	 * randomization, and the BUG at the end of exit_mmap.  exit_mmap
         * checks that the number of page tables freed is the same as had
         * been allocated.  If the stack is on the last page table page,
	 * then the stack pte page will be freed, and if not, it won't.  To
	 * avoid having to know where the stack is, or if the process mapped
	 * something at the top of its address space for some other reason,
	 * we set TASK_SIZE to end at the start of the last page table.
	 * This keeps exit_mmap off the last page, but introduces a leak
	 * of that page.  So, we hang onto it here and free it in
	 * destroy_context_skas.
	 */

        mm->context.skas.last_page_table = pmd_page_kernel(*pmd);
#ifdef CONFIG_3_LEVEL_PGTABLES
        mm->context.skas.last_pmd = (unsigned long) __va(pud_val(*pud));
#endif

	*pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
	*pte = pte_mkexec(*pte);
	*pte = pte_wrprotect(*pte);
	return(0);

 out_pmd:
	pud_free(pud);
 out_pte:
	pmd_free(pmd);
 out:
	return(-ENOMEM);
}
static void flush_icache_page_range(unsigned long start, unsigned long end)
{
	unsigned long addr, size, off;
	struct page *page;
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *ppte, pte;

	/*                                        */
	off = start & ~PAGE_MASK;
	size = end - start;

	/*                                                             
           */
	pgd = pgd_offset(current->mm, start);
	if (!pgd || !pgd_val(*pgd))
		return;

	pud = pud_offset(pgd, start);
	if (!pud || !pud_val(*pud))
		return;

	pmd = pmd_offset(pud, start);
	if (!pmd || !pmd_val(*pmd))
		return;

	ppte = pte_offset_map(pmd, start);
	if (!ppte)
		return;
	pte = *ppte;
	pte_unmap(ppte);

	if (pte_none(pte))
		return;

	page = pte_page(pte);
	if (!page)
		return;

	addr = page_to_phys(page);

	/*                                                            
           */
	mn10300_local_dcache_flush_range2(addr + off, size);
	mn10300_local_icache_inv_range2(addr + off, size);
	smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start, end);
}
Ejemplo n.º 11
0
/**
 * flush_icache_page_range - Flush dcache and invalidate icache for part of a
 *				single page
 * @start: The starting virtual address of the page part.
 * @end: The ending virtual address of the page part.
 *
 * Flush the dcache and invalidate the icache for part of a single page, as
 * determined by the virtual addresses given.  The page must be in the paged
 * area.
 */
static void flush_icache_page_range(unsigned long start, unsigned long end)
{
	unsigned long addr, size, off;
	struct page *page;
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *ppte, pte;

	/* work out how much of the page to flush */
	off = start & ~PAGE_MASK;
	size = end - start;

	/* get the physical address the page is mapped to from the page
	 * tables */
	pgd = pgd_offset(current->mm, start);
	if (!pgd || !pgd_val(*pgd))
		return;

	pud = pud_offset(pgd, start);
	if (!pud || !pud_val(*pud))
		return;

	pmd = pmd_offset(pud, start);
	if (!pmd || !pmd_val(*pmd))
		return;

	ppte = pte_offset_map(pmd, start);
	if (!ppte)
		return;
	pte = *ppte;
	pte_unmap(ppte);

	if (pte_none(pte))
		return;

	page = pte_page(pte);
	if (!page)
		return;

	addr = page_to_phys(page);

	/* flush the dcache and invalidate the icache coverage on that
	 * region */
	mn10300_local_dcache_flush_range2(addr + off, size);
	mn10300_local_icache_inv_range2(addr + off, size);
	smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start, end);
}
Ejemplo n.º 12
0
/*
 * The VSYSCALL page is the only user-accessible page in the kernel address
 * range.  Normally, the kernel page tables can have _PAGE_USER clear, but
 * the tables covering VSYSCALL_ADDR need _PAGE_USER set if vsyscalls
 * are enabled.
 *
 * Some day we may create a "minimal" vsyscall mode in which we emulate
 * vsyscalls but leave the page not present.  If so, we skip calling
 * this.
 */
void __init set_vsyscall_pgtable_user_bits(pgd_t *root)
{
	pgd_t *pgd;
	p4d_t *p4d;
	pud_t *pud;
	pmd_t *pmd;

	pgd = pgd_offset_pgd(root, VSYSCALL_ADDR);
	set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER));
	p4d = p4d_offset(pgd, VSYSCALL_ADDR);
#if CONFIG_PGTABLE_LEVELS >= 5
	set_p4d(p4d, __p4d(p4d_val(*p4d) | _PAGE_USER));
#endif
	pud = pud_offset(p4d, VSYSCALL_ADDR);
	set_pud(pud, __pud(pud_val(*pud) | _PAGE_USER));
	pmd = pmd_offset(pud, VSYSCALL_ADDR);
	set_pmd(pmd, __pmd(pmd_val(*pmd) | _PAGE_USER));
}
Ejemplo n.º 13
0
int oleole_get_gPTE_offset_with_alloc(struct mm_struct *mm, pte_t **result, unsigned long address)
{
	pgd_t *pgd, pgd_v;
	pud_t *pud, pud_v;
	pmd_t *pmd, pmd_v;
	pte_t *pte;

	pgd   = pgd_offset(mm, address);
	pgd_v = *pgd;
	if (pgd_none(pgd_v))
		if (pud_alloc(mm, pgd, address) == NULL)
			return -ENOMEM;

	pud   = pud_offset(pgd, address);
	pud_v = *pud;

	if (oleole_pud_none(pud_v))
		if (oleole_pmd_alloc(pud))
			return -ENOMEM;

	if (unlikely((pud_val(pud_v) & _PAGE_DEACTIVATED)))
		reactivate_pmd_table(pud);

	pmd   = pmd_offset(pud, address);
	pmd_v = *pmd;

	if (oleole_pmd_none(pmd_v))
		if (oleole_pte_alloc(pmd))
			return -ENOMEM;

	if (unlikely((pmd_val(pmd_v) & _PAGE_DEACTIVATED)))
		reactivate_pte_table(pmd);

	pte  = pte_offset_map(pmd, address);

	*result = pte;

	return 0;
}
Ejemplo n.º 14
0
static void reactivate_pmd_table(pud_t *pud)
{
	int i;
	pmd_t *pmd;
	unsigned long val;

	val  = pud_val(*pud);

	if (likely(val & ~(_PAGE_DEACTIVATED|_PAGE_PRESENT)))
		val &= ~_PAGE_DEACTIVATED;
	else
		val &= ~(_PAGE_DEACTIVATED|_PAGE_PRESENT);

	*pud = __pud(val);

	pmd = pmd_offset(pud, 0);
	for (i=0 ; i<PTRS_PER_PMD ; i++, pmd++) {
		val  = pmd_val(*pmd);
		val |= (_PAGE_DEACTIVATED|_PAGE_PRESENT);
		*pmd = __pmd(val);
	}
}
Ejemplo n.º 15
0
static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
	unsigned long prot)
{
	pmd_t *pmd;
	unsigned long next;

	if (pud_none_or_clear_bad(pud) || (pud_val(*pud) & L_PGD_SWAPPER)) {
		pmd = pmd_alloc_one(NULL, addr);
		if (!pmd) {
			pr_warning("Failed to allocate identity pmd.\n");
			return;
		}
		pud_populate(NULL, pud, pmd);
		pmd += pmd_index(addr);
	} else
		pmd = pmd_offset(pud, addr);

	do {
		next = pmd_addr_end(addr, end);
		*pmd = __pmd((addr & PMD_MASK) | prot);
		flush_pmd_entry(pmd);
	} while (pmd++, addr = next, addr != end);
}
Ejemplo n.º 16
0
static void walk_pud_level(struct seq_file *m, struct pg_state *st, pgd_t addr,
							unsigned long P)
{
	int i;
	pud_t *start;

	start = (pud_t *) pgd_page_vaddr(addr);

	for (i = 0; i < PTRS_PER_PUD; i++) {
		st->current_address = normalize_addr(P + i * PUD_LEVEL_MULT);
		if (!pud_none(*start)) {
			pgprotval_t prot = pud_val(*start) & PTE_FLAGS_MASK;

			if (pud_large(*start) || !pud_present(*start))
				note_page(m, st, __pgprot(prot), 2);
			else
				walk_pmd_level(m, st, *start,
					       P + i * PUD_LEVEL_MULT);
		} else
			note_page(m, st, __pgprot(0), 2);

		start++;
	}
}
Ejemplo n.º 17
0
/*
 * Dump out the page tables associated with 'addr' in mm 'mm'.
 */
void show_pte(struct mm_struct *mm, unsigned long addr)
{
	pgd_t *pgd;

	if (!mm)
		mm = &init_mm;

	pr_alert("pgd = %p\n", mm->pgd);
	pgd = pgd_offset(mm, addr);
	pr_alert("[%08lx] *pgd=%016llx", addr, pgd_val(*pgd));

	do {
		pud_t *pud;
		pmd_t *pmd;
		pte_t *pte;

		if (pgd_none(*pgd) || pgd_bad(*pgd))
			break;

		pud = pud_offset(pgd, addr);
		printk(", *pud=%016llx", pud_val(*pud));
		if (pud_none(*pud) || pud_bad(*pud))
			break;

		pmd = pmd_offset(pud, addr);
		printk(", *pmd=%016llx", pmd_val(*pmd));
		if (pmd_none(*pmd) || pmd_bad(*pmd))
			break;

		pte = pte_offset_map(pmd, addr);
		printk(", *pte=%016llx", pte_val(*pte));
		pte_unmap(pte);
	} while(0);

	printk("\n");
}
Ejemplo n.º 18
0
static int find_pgd_init(void) 
{ 
        unsigned long pa = 0; 
        struct task_struct *pcb_tmp = NULL; 
        pgd_t *pgd_tmp = NULL; 
        pud_t *pud_tmp = NULL; 
        pmd_t *pmd_tmp = NULL; 
        pte_t *pte_tmp = NULL; 
  
        printk(KERN_INFO"PAGE_OFFSET = 0x%lx\n",PAGE_OFFSET); 
        printk(KERN_INFO"PGDIR_SHIFT = %d\n",PGDIR_SHIFT); 
        printk(KERN_INFO"PUD_SHIFT = %d\n",PUD_SHIFT); 
        printk(KERN_INFO"PMD_SHIFT = %d\n",PMD_SHIFT); 
        printk(KERN_INFO"PAGE_SHIFT = %d\n",PAGE_SHIFT); 
  
        printk(KERN_INFO"PTRS_PER_PGD = %d\n",PTRS_PER_PGD); 
        printk(KERN_INFO"PTRS_PER_PUD = %d\n",PTRS_PER_PUD); 
        printk(KERN_INFO"PTRS_PER_PMD = %d\n",PTRS_PER_PMD); 
        printk(KERN_INFO"PTRS_PER_PTE = %d\n",PTRS_PER_PTE); 
  
        printk(KERN_INFO"PAGE_MASK = 0x%lx\n",PAGE_MASK); 
  
        if(!(pcb_tmp = find_task_by_vpid(pid))) { 
                printk(KERN_INFO"Can't find the task %d .\n",pid); 
                return 0; 
        } 
        printk(KERN_INFO"pgd = 0x%p\n",pcb_tmp->mm->pgd); 
                /* 判断给出的地址va是否合法(va&lt;vm_end)*/ 
        if(!find_vma(pcb_tmp->mm,va)){ 
                printk(KERN_INFO"virt_addr 0x%lx not available.\n",va); 
                return 0; 
        } 
        pgd_tmp = pgd_offset(pcb_tmp->mm,va); 
        printk(KERN_INFO"pgd_tmp = 0x%p\n",pgd_tmp); 
        printk(KERN_INFO"pgd_val(*pgd_tmp) = 0x%lx\n",pgd_val(*pgd_tmp)); 
        if(pgd_none(*pgd_tmp)){ 
                printk(KERN_INFO"Not mapped in pgd.\n");         
                return 0; 
        } 
        pud_tmp = pud_offset(pgd_tmp,va); 
        printk(KERN_INFO"pud_tmp = 0x%p\n",pud_tmp); 
        printk(KERN_INFO"pud_val(*pud_tmp) = 0x%lx\n",pud_val(*pud_tmp)); 
        if(pud_none(*pud_tmp)){ 
                printk(KERN_INFO"Not mapped in pud.\n"); 
                return 0; 
        } 
        pmd_tmp = pmd_offset(pud_tmp,va); 
        printk(KERN_INFO"pmd_tmp = 0x%p\n",pmd_tmp); 
        printk(KERN_INFO"pmd_val(*pmd_tmp) = 0x%lx\n",pmd_val(*pmd_tmp)); 
        if(pmd_none(*pmd_tmp)){ 
                printk(KERN_INFO"Not mapped in pmd.\n"); 
                return 0; 
        } 
        /*在这里,把原来的pte_offset_map()改成了pte_offset_kernel*/ 
        pte_tmp = pte_offset_kernel(pmd_tmp,va); 
  
        printk(KERN_INFO"pte_tmp = 0x%p\n",pte_tmp); 
        printk(KERN_INFO"pte_val(*pte_tmp) = 0x%lx\n",pte_val(*pte_tmp)); 
        if(pte_none(*pte_tmp)){ 
                printk(KERN_INFO"Not mapped in pte.\n"); 
                return 0; 
        } 
        if(!pte_present(*pte_tmp)){ 
                printk(KERN_INFO"pte not in RAM.\n"); 
                return 0; 
        } 
        pa = (pte_val(*pte_tmp) && PAGE_MASK) |(va && ~PAGE_MASK); 
        printk(KERN_INFO"virt_addr 0x%lx in RAM is 0x%lx .\n",va,pa); 
        printk(KERN_INFO"contect in 0x%lx is 0x%lx\n",pa, 
                *(unsigned long *)((char *)pa + PAGE_OFFSET)); 
                                                         
        return 0; 
  
} 
Ejemplo n.º 19
0
inline pmd_t* nvmm_get_pmd(pud_t *pud)
{
    return (pmd_t*)__va(pud_val(*pud) & PAGE_MASK);
}
Ejemplo n.º 20
0
static void kvm_set_pud(pud_t *pudp, pud_t pud)
{
	kvm_mmu_write(pudp, pud_val(pud));
}
int pud_huge(pud_t pud)
{
	return (pud_val(pud) & _PAGE_HUGE) != 0;
}
Ejemplo n.º 22
0
static inline pmd_t* nvmm_get_pud_entry(pud_t *const pud)
{
	return (pmd_t*)__va(pud_val(*pud) & PAGE_MASK);
}
Ejemplo n.º 23
0
int pud_huge(pud_t pud)
{
	return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
}
Ejemplo n.º 24
0
static ssize_t showmmap_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) 
{
  char lbuf[32];
  int pagecount, addr, pid;
  char *ptxt, *porg;
  struct mm_struct *mm;
  struct task_struct *tsk;
  pgd_t *pgd;
  pud_t *pud;
  pmd_t *pmd;
  pte_t *pte;

  if (count > 30)
    return -EINVAL;
  if (copy_from_user(lbuf, buf, count))
    return -EINVAL;

  lbuf[count] = 0;
  sscanf(lbuf, "%d %x %d", &pid, &addr, &pagecount);
  if (pagecount > 1024)
    return -EINVAL;

  //align addr to a 4k bounder
  addr = addr & ~PAGE_SIZE;

  //prepare output memory buffer
  if (showmmap_txt_buf != NULL)
    kfree(showmmap_txt_buf);
  showmmap_txt_size = pagecount * OUTPUT_BYTES_PER_PAGE;
  showmmap_txt_buf = kmalloc(showmmap_txt_size, GFP_USER);
  if (showmmap_txt_buf == NULL)
    return -EINVAL;
  ptxt = showmmap_txt_buf;
  porg = ptxt;

  //first, get mm_struct by pid
  rcu_read_lock();
  tsk = pid_task(find_get_pid(pid), PIDTYPE_PID);
  if (!tsk) {
    ptxt += sprintf(ptxt, "can't find process %d\n", pid);
    goto fail;
  }

  mm = tsk->mm;

  while(pagecount > 0) {
    pagecount--;
    pgd = pgd_offset(mm, addr);
    if (!pgd || !pgd_present(*pgd)) {
	ptxt += sprintf(ptxt, "[%08x]:not mapped pgd\n", addr);
	continue;
    }
    pud = pud_offset(pgd, addr);
    if (!pud || !pud_present(*pud)) {
	ptxt += sprintf(ptxt, "[%08x]:not mapped pud(pgd %08x)\n", addr, pgd_val(*pgd));
	continue;
    }
    pmd = pmd_offset(pud, addr);
    if (!pmd || !pmd_present(*pmd)) {
	ptxt += sprintf(ptxt, "[%08x]:not mapped pmd(pgd %08x pud %08x)\n", addr, pgd_val(*pgd), pud_val(*pud));
	continue;
    }
    pte = pte_offset_map(pmd, addr);

    if (!pte || !pte_present(*pte))
      ptxt += sprintf(ptxt, "[%08x]:not mapped(pgd %08x pud %08x pmd %08x)\n", addr, pgd_val(*pgd), pud_val(*pud), pmd_val(*pmd));
    else
      ptxt += sprintf(ptxt, "[%08x]:phy %08x %d\n", addr, pte_val(*pte), pte_pfn(*pte));

    addr += PAGE_SIZE;
  }

 fail:
  rcu_read_unlock();

  showmmap_txt_size = ptxt - porg;
  showmmap_txt_start = 0;

  return count;
}