Ejemplo n.º 1
0
int kern_addr_valid(unsigned long addr) 
{ 
	unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
       pgd_t *pgd;
       pud_t *pud;
       pmd_t *pmd;
       pte_t *pte;

	if (above != 0 && above != -1UL)
		return 0; 
	
	pgd = pgd_offset_k(addr);
	if (pgd_none(*pgd))
		return 0;

	pud = pud_offset(pgd, addr);
	if (pud_none(*pud))
		return 0; 

	pmd = pmd_offset(pud, addr);
	if (pmd_none(*pmd))
		return 0;
	if (pmd_large(*pmd))
		return pfn_valid(pmd_pfn(*pmd));

	pte = pte_offset_kernel(pmd, addr);
	if (pte_none(*pte))
		return 0;
	return pfn_valid(pte_pfn(*pte));
}
Ejemplo n.º 2
0
int syscall_hooking_init(void)
{
	if( (sys_call_table = locate_sys_call_table()) == NULL){
		printk("<0> Can't find sys_call_table\n");
		return -1;
	}
	pgd_t *pgd = pgd_offset_k((unsigned long)sys_call_table);
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	if( pgd_none(*pgd))
		return NULL;
	pud = pud_offset(pgd, (unsigned long)sys_call_table);
	if( pud_none(*pud))
		return NULL;
	pmd = pmd_offset(pud, (unsigned long)sys_call_table);
	if( pmd_none(*pmd))
		return NULL;
	if( pmd_large(*pmd)){
		pte = (pte_t *)pmd;
	}else{
		pte = pte_offset_kernel(pmd, (unsigned long)sys_call_table);
	}
	pte->pte_low |= _PAGE_KERNEL;
	__flush_tlb_single((unsigned long)sys_call_table);
	printk("<0> sys_call_table is loaded at %p\n", sys_call_table);

	original_call = (void *)sys_call_table[__NR_open];
	sys_call_table[__NR_open] = (void *)sys_our_open;
	
	printk("<0> Module Init\n");
	return 0;
}
Ejemplo n.º 3
0
static int relocate_restore_code(void)
{
	pgd_t *pgd;
	pud_t *pud;

	relocated_restore_code = get_safe_page(GFP_ATOMIC);
	if (!relocated_restore_code)
		return -ENOMEM;

	memcpy((void *)relocated_restore_code, &core_restore_code, PAGE_SIZE);

	/* Make the page containing the relocated code executable */
	pgd = (pgd_t *)__va(read_cr3()) + pgd_index(relocated_restore_code);
	pud = pud_offset(pgd, relocated_restore_code);
	if (pud_large(*pud)) {
		set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
	} else {
		pmd_t *pmd = pmd_offset(pud, relocated_restore_code);

		if (pmd_large(*pmd)) {
			set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
		} else {
			pte_t *pte = pte_offset_kernel(pmd, relocated_restore_code);

			set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX));
		}
	}
	__flush_tlb_all();

	return 0;
}
Ejemplo n.º 4
0
static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
				      unsigned long end, int nid)
{
	pmd_t *pmd;
	unsigned long next;

	if (pud_none(*pud)) {
		void *p;

		if (boot_cpu_has(X86_FEATURE_GBPAGES) &&
		    ((end - addr) == PUD_SIZE) &&
		    IS_ALIGNED(addr, PUD_SIZE)) {
			p = early_alloc(PUD_SIZE, nid, false);
			if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
				return;
			else if (p)
				memblock_free(__pa(p), PUD_SIZE);
		}

		p = early_alloc(PAGE_SIZE, nid, true);
		pud_populate(&init_mm, pud, p);
	}

	pmd = pmd_offset(pud, addr);
	do {
		next = pmd_addr_end(addr, end);
		if (!pmd_large(*pmd))
			kasan_populate_pmd(pmd, addr, next, nid);
	} while (pmd++, addr = next, addr != end);
}
Ejemplo n.º 5
0
static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
		int write, struct page **pages, int *nr)
{
	unsigned long next;
	pmd_t *pmdp;

	pmdp = pmd_offset(&pud, addr);
	do {
		pmd_t pmd = ACCESS_ONCE(*pmdp);

		next = pmd_addr_end(addr, end);
		/*
		 * If we find a splitting transparent hugepage we
		 * return zero. That will result in taking the slow
		 * path which will call wait_split_huge_page()
		 * if the pmd is still in splitting state
		 */
		if (pmd_none(pmd) || pmd_trans_splitting(pmd))
			return 0;
		if (pmd_huge(pmd) || pmd_large(pmd)) {
			if (!gup_hugepte((pte_t *)pmdp, PMD_SIZE, addr, next,
					 write, pages, nr))
				return 0;
		} else if (is_hugepd(pmdp)) {
			if (!gup_hugepd((hugepd_t *)pmdp, PMD_SHIFT,
					addr, next, write, pages, nr))
				return 0;
		} else if (!gup_pte_range(pmd, addr, next, write, pages, nr))
			return 0;
	} while (pmdp++, addr = next, addr != end);

	return 1;
}
Ejemplo n.º 6
0
static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
{
        pmd_t *pmd = pmd_offset(pud, 0);
        unsigned long addr;
        unsigned i;

        for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
                addr = start + i * PMD_SIZE;
#ifdef CONFIG_ARM64
		if (pmd_none(*pmd) || pmd_sect (*pmd)) {
#else
		if (pmd_none(*pmd) || pmd_large(*pmd) || !pmd_present(*pmd)) {
#endif
                        note_page(st, addr, 3, pmd_val(*pmd));
                } else {
                        walk_pte(st, pmd, addr);
		}
#ifdef CONFIG_ARM
                if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1])) 
                        note_page(st, addr + SECTION_SIZE, 3, pmd_val(pmd[1]));
#endif
        }
}

static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
{
        pud_t *pud = pud_offset(pgd, 0);
        unsigned long addr;
        unsigned i;

        for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
                addr = start + i * PUD_SIZE;
#if defined CONFIG_ARM64 && !defined (CONFIG_ANDROID)
		if (pud_none (*pud) || pud_sect (*pud)) {
			note_page (st, addr, 2, pud_val (*pud));
		} else {
		       	walk_pmd (st, pud, addr);
		}
#else
                if (!pud_none(*pud)) {
			walk_pmd (st, pud, addr);
		} else {
			note_page (st, addr, 2, pud_val (*pud));
		}
#endif
        }
}
Ejemplo n.º 7
0
unsigned long virtaddr_to_physaddr(struct mm_struct *mm, unsigned long vaddr)
{
    pgd_t *pgd;
    pud_t *pud;
    pmd_t *pmd;
    pte_t *pte;
    unsigned long paddr = 0;

    pgd = pgd_offset(mm, vaddr);
    printk("pgd_val = 0x%lx\n", pgd_val(*pgd));
    printk("pgd_index = %lu\n", pgd_index(vaddr));
    if (pgd_none(*pgd)) {
        printk("not mapped in pgd\n");
        return INVALID_ADDR;
    }

    pud = pud_offset(pgd, vaddr);
    printk("pud_val = 0x%lx\n", pud_val(*pud));
    printk("pud_index = %lu\n", pud_index(vaddr));
    if (pud_none(*pud)) {
        printk("not mapped in pud\n");
        return INVALID_ADDR;
    }

    pmd = pmd_offset(pud, vaddr);
    printk("pmd_val = 0x%lx\n", pmd_val(*pmd));
    printk("pmd_index = %lx\n", pmd_index(vaddr));
    if(pmd_none(*pmd)){
        printk("not mapped in pmd\n");
        return INVALID_ADDR;
    }
    /*If pmd_large is true, represent pmd is the last level*/
    if(pmd_large(*pmd)){
        paddr = (pmd_val(*pmd) & PAGE_MASK);
        paddr = paddr | (vaddr & ~PAGE_MASK);
        return paddr;
    }
    /*Walk the forth level page table
    ** you may use PAGE_MASK = 0xfffffffffffff000 to help you get [0:11] bits
    ***/
    else{
        /* XXX: Need to implement */
    	pte = pte_offset_kernel(pmd, vaddr);
    	printk("pte_val = 0x%lx\n", pte_val(*pte));
    	printk("pte_index = %lx\n", pte_index(vaddr));
    	if(pte_none(*pte)){
    	    printk("not mapped in pte\n");
    	    return INVALID_ADDR;
    	}
        paddr = (pte_val(*pte) & PAGE_MASK);
        paddr = paddr | (vaddr & ~PAGE_MASK);
    	printk("paddr = %lx\n", paddr);
    	printk("__pa = %lx\n", __pa(vaddr));   /* magic macro in the kernel */
        /* End of implement */
        return paddr;
    }

}
Ejemplo n.º 8
0
/*
 * set a new huge pmd. We should not be called for updating
 * an existing pmd entry. That should go via pmd_hugepage_update.
 */
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
		pmd_t *pmdp, pmd_t pmd)
{
#ifdef CONFIG_DEBUG_VM
	/*
	 * Make sure hardware valid bit is not set. We don't do
	 * tlb flush for this update.
	 */

	WARN_ON(pte_hw_valid(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
	assert_spin_locked(pmd_lockptr(mm, pmdp));
	WARN_ON(!(pmd_large(pmd) || pmd_devmap(pmd)));
#endif
	trace_hugepage_set_pmd(addr, pmd_val(pmd));
	return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
}
Ejemplo n.º 9
0
pte_t *lookup_address(unsigned long address) 
{ 
	pgd_t *pgd = pgd_offset_k(address);
	pud_t *pud;
	pmd_t *pmd;
	if (pgd_none(*pgd))
		return NULL;
	pud = pud_offset(pgd, address);
	if (pud_none(*pud))
		return NULL;
	pmd = pmd_offset(pud, address);
	if (pmd_none(*pmd))
		return NULL;
	if (pmd_large(*pmd))
		return (pte_t *)pmd;
        return pte_offset_kernel(pmd, address);
} 
Ejemplo n.º 10
0
unsigned long l4x_set_pmd(struct mm_struct *mm,
                          unsigned long addr,
                          pmd_t old, pmd_t pmdval)
{
	/*
	 * Check if any invalidation is necessary
	 *
	 * Invalidation (flush) necessary if:
	 *   old page was present
	 *       new page is not present OR
	 *       new page has another physical address OR
	 *       new page has another protection OR
	 *       new page has other access attributes
	 */

	/* old was present && new not -> flush */
	int flush_rights = L4_FPAGE_RWX;
	BUG_ON(!pmd_large(old));

	if (pmd_present(pmdval)) {
		/* new page is present,
		 * now we have to find out what has changed */
		if (((pmd_val(old) ^ pmd_val(pmdval)) & PMD_PAGE_MASK & PHYSICAL_PAGE_MASK)
		    || (pmd_young(old) && !pmd_young(pmdval))) {
			/* physical page frame changed
			 * || access attribute changed -> flush */
			/* flush is the default */
		} else if ((pmd_write(old) && !pmd_write(pmdval))
		           || (pmd_flags(old) & ~pmd_flags(pmdval)
		               & (_PAGE_DIRTY | _PAGE_SOFT_DIRTY))) {
			/* Protection changed from r/w to ro
			 * or page now clean -> remap */
			flush_rights = L4_FPAGE_W;
		} else {
			/* nothing changed, simply return */
			return pmd_val(pmdval);
		}
	}

	/* Ok, now actually flush or remap the page */
	l4x_flush_page(mm, pmd_val(old) & PMD_PAGE_MASK & PHYSICAL_PAGE_MASK,
	               addr, PMD_SHIFT, flush_rights, _RET_IP_);
	return pmd_val(pmdval);
}
Ejemplo n.º 11
0
//walk_page_table modified
static pte_t *walk_page_table(unsigned long addr)
{
   pgd_t *pgdp;
   pud_t *pudp;
   pmd_t *pmdp;
   pte_t *ptep;
   pgdp = pgd_offset_k(addr);
   if (pgd_none(*pgdp))
      return NULL;
   pudp = pud_offset(pgdp,addr);
   if (pud_none(*pudp) || pud_large(*pudp))
      return NULL;
   pmdp = pmd_offset(pudp, addr);
   if (pmd_none(*pmdp) || pmd_large(*pmdp))
      return NULL;
   ptep = pte_offset_kernel(pmdp, addr);
   if (pte_none(*ptep))
      return NULL;
   return ptep;
}
Ejemplo n.º 12
0
static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
		int write, struct page **pages, int *nr)
{
	unsigned long next;
	pmd_t *pmdp;

	pmdp = pmd_offset(&pud, addr);
	do {
		pmd_t pmd = READ_ONCE(*pmdp);

		next = pmd_addr_end(addr, end);
		/*
		 * If we find a splitting transparent hugepage we
		 * return zero. That will result in taking the slow
		 * path which will call wait_split_huge_page()
		 * if the pmd is still in splitting state
		 */
		if (pmd_none(pmd) || pmd_trans_splitting(pmd))
			return 0;
		if (pmd_huge(pmd) || pmd_large(pmd)) {
			/*
			 * NUMA hinting faults need to be handled in the GUP
			 * slowpath for accounting purposes and so that they
			 * can be serialised against THP migration.
			 */
			if (pmd_numa(pmd))
				return 0;

			if (!gup_hugepte((pte_t *)pmdp, PMD_SIZE, addr, next,
					 write, pages, nr))
				return 0;
		} else if (is_hugepd(pmdp)) {
			if (!gup_hugepd((hugepd_t *)pmdp, PMD_SHIFT,
					addr, next, write, pages, nr))
				return 0;
		} else if (!gup_pte_range(pmd, addr, next, write, pages, nr))
			return 0;
	} while (pmdp++, addr = next, addr != end);

	return 1;
}
Ejemplo n.º 13
0
static inline pte_t *tpe_lookup_address(unsigned long address, unsigned int *level)
{
	pgd_t *pgd = pgd_offset_k(address);
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	if (pgd_none(*pgd))
		return NULL;
	pud = pud_offset(pgd, address);
	if (!pud_present(*pud))
		return NULL;
	pmd = pmd_offset(pud, address);
	if (!pmd_present(*pmd))
		return NULL;
	if (pmd_large(*pmd))
		return (pte_t *)pmd;
	pte = pte_offset_kernel(pmd, address);
	if (pte && !pte_present(*pte))
		pte = NULL;
	return pte;
}
Ejemplo n.º 14
0
static pte_t *
pgtbl_lookup_address(paddr_t pgtbl, unsigned long addr)
{
	pgd_t *pgd = ((pgd_t *)chal_pa2va((void*)pgtbl)) + pgd_index(addr);
	pud_t *pud;
	pmd_t *pmd;
	if (pgd_none(*pgd)) {
		return NULL;
	}
	pud = pud_offset(pgd, addr);
	if (pud_none(*pud)) {
		return NULL;
	}
	pmd = pmd_offset(pud, addr);
	if (pmd_none(*pmd)) {
		return NULL;
	}
	if (pmd_large(*pmd))
		return (pte_t *)pmd;
        return pte_offset_kernel(pmd, addr);
}
Ejemplo n.º 15
0
static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t addr,
							unsigned long P)
{
	int i;
	pmd_t *start;

	start = (pmd_t *) pud_page_vaddr(addr);
	for (i = 0; i < PTRS_PER_PMD; i++) {
		st->current_address = normalize_addr(P + i * PMD_LEVEL_MULT);
		if (!pmd_none(*start)) {
			pgprotval_t prot = pmd_val(*start) & PTE_FLAGS_MASK;

			if (pmd_large(*start) || !pmd_present(*start))
				note_page(m, st, __pgprot(prot), 3);
			else
				walk_pte_level(m, st, *start,
					       P + i * PMD_LEVEL_MULT);
		} else
			note_page(m, st, __pgprot(0), 3);
		start++;
	}
}
Ejemplo n.º 16
0
/*
 * map any virtual address of the current process to its
 * physical one.
 */
static unsigned long long any_v2p(unsigned long long vaddr)
{
	pgd_t *pgd = pgd_offset(current->mm, vaddr);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
	p4d_t *p4d;
#endif
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

	/* to lock the page */
	struct page *pg;
	unsigned long long paddr;

	if (bad_address(pgd)) {
		printk(KERN_ALERT "[nskk] Alert: bad address of pgd %p\n", pgd);
		goto bad;
	}
	if (!pgd_present(*pgd)) {
		printk(KERN_ALERT "[nskk] Alert: pgd not present %lu\n", *pgd);
		goto out;
	}

#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
	p4d = p4d_offset(pgd, vaddr);
	if (p4d_none(*p4d))
		return 0;
	pud = pud_offset(p4d, vaddr);
#else
	pud = pud_offset(pgd, vaddr);
#endif
	if (bad_address(pud)) {
		printk(KERN_ALERT "[nskk] Alert: bad address of pud %p\n", pud);
		goto bad;
	}
	if (!pud_present(*pud) || pud_large(*pud)) {
		printk(KERN_ALERT "[nskk] Alert: pud not present %lu\n", *pud);
		goto out;
	}

	pmd = pmd_offset(pud, vaddr);
	if (bad_address(pmd)) {
		printk(KERN_ALERT "[nskk] Alert: bad address of pmd %p\n", pmd);
		goto bad;
	}
	if (!pmd_present(*pmd) || pmd_large(*pmd)) {
		printk(KERN_ALERT "[nskk] Alert: pmd not present %lu\n", *pmd);
		goto out;
	}

	pte = pte_offset_kernel(pmd, vaddr);
	if (bad_address(pte)) {
		printk(KERN_ALERT "[nskk] Alert: bad address of pte %p\n", pte);
		goto bad;
	}
	if (!pte_present(*pte)) {
		printk(KERN_ALERT "[nskk] Alert: pte not present %lu\n", *pte);
		goto out;
	}

	pg = pte_page(*pte);
#if 1
	paddr = (pte_val(*pte) & PHYSICAL_PAGE_MASK) | (vaddr&(PAGE_SIZE-1));
#else
	pte->pte |= _PAGE_RW; // | _PAGE_USER;
	paddr = pte_val(*pte);
#endif

out:
	return paddr;
bad:
	printk(KERN_ALERT "[nskk] Alert: Bad address\n");
	return 0;
}