Exemplo n.º 1
0
Arquivo: mmu.c Projeto: 0xheart0/linux
static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
			      phys_addr_t addr, phys_addr_t end)
{
	pte_t *pte;

	pte = pte_offset_kernel(pmd, addr);
	do {
		if (!pte_none(*pte)) {
			hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
			kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE);
		}
	} while (pte++, addr += PAGE_SIZE, addr != end);
}
Exemplo n.º 2
0
static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
{
	if (kvm_pmd_huge(*pmd)) {
		pmd_clear(pmd);
		kvm_tlb_flush_vmid_ipa(kvm, addr);
	} else {
		pte_t *pte_table = pte_offset_kernel(pmd, 0);
		pmd_clear(pmd);
		kvm_tlb_flush_vmid_ipa(kvm, addr);
		pte_free_kernel(NULL, pte_table);
	}
	put_page(virt_to_page(pmd));
}
Exemplo n.º 3
0
static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node,
				      bool early)
{
	if (pmd_none(READ_ONCE(*pmdp))) {
		phys_addr_t pte_phys = early ?
				__pa_symbol(kasan_early_shadow_pte)
					: kasan_alloc_zeroed_page(node);
		__pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
	}

	return early ? pte_offset_kimg(pmdp, addr)
		     : pte_offset_kernel(pmdp, addr);
}
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte = NULL;

	pgd = pgd_offset(mm, addr);
	pud = pud_offset(pgd, addr);
	pmd = pmd_offset(pud, addr);
	pte = pte_offset_kernel(pmd, addr);

	return pte;
}
Exemplo n.º 5
0
int __init wip_init(void)
{
	unsigned long va = 0xb77e5000;
	int pid = 1072;
	//struct page p;
	unsigned long long pageFN;
	unsigned long long pa;

	pgd_t *pgd;
	pmd_t *pmd;
	pud_t *pud;
	pte_t *pte;
	
	struct mm_struct *mm;

	int found = 0;

	struct task_struct *task;
	for_each_process(task)
	{
		if(task->pid == pid)
			mm = task->mm;
	}
	pgd  = pgd_offset(mm,va);
	if(!pgd_none(*pgd) && !pgd_bad(*pgd))
	{
		pud = pud_offset(pgd,va);
		if(!pud_none(*pud) && !pud_bad(*pud))
		{
			pmd = pmd_offset(pud,va);
			if(!pmd_none(*pmd) && !pmd_bad(*pmd))
			{
				pte = pte_offset_kernel(pmd,va);
				if(!pte_none(*pte))
				{
					pageFN = pte_pfn(*pte);
					pa = ((pageFN<<12)|(va&0x00000FFF));
					found = 1;
					printk(KERN_ALERT "Physical Address: 0x%08llx\npfn: 0x%04llx\n", pa, pageFN);
				}
			}
		}
	}
	if(pgd_none(*pgd) || pud_none(*pud) || pmd_none(*pmd) || pte_none(*pte))
	{
		unsigned long long swapID = (pte_val(*pte) >> 32);
		found = 1;
		printk(KERN_ALERT "swap ID: 0x%08llx\n", swapID);
	}
Exemplo n.º 6
0
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd = NULL;
	pte_t *pte = NULL;

	pgd = pgd_offset(mm, addr);
	pr_debug("%s: addr:0x%lx pgd:%p\n", __func__, addr, pgd);
	if (!pgd_present(*pgd))
		return NULL;
	pud = pud_offset(pgd, addr);
	if (!pud_present(*pud))
		return NULL;

	if (pud_huge(*pud))
		return (pte_t *)pud;
	pmd = pmd_offset(pud, addr);
	if (!pmd_present(*pmd))
		return NULL;

	if (pte_cont(pmd_pte(*pmd))) {
		pmd = pmd_offset(
			pud, (addr & CONT_PMD_MASK));
		return (pte_t *)pmd;
	}
	if (pmd_huge(*pmd))
		return (pte_t *)pmd;
	pte = pte_offset_kernel(pmd, addr);
	if (pte_present(*pte) && pte_cont(*pte)) {
		pte = pte_offset_kernel(
			pmd, (addr & CONT_PTE_MASK));
		return pte;
	}
	return NULL;
}
Exemplo n.º 7
0
/**
 * @brief   Translate user virtual address to physical address.
 * @param   va [in] user virtual address.
 * @return 	success: physical address, fail: 0
 * @see     CHUNK_MEM_FREE
 */
unsigned int gp_user_va_to_pa(void *va)
{
	pgd_t *pgd = NULL;
	pud_t *pud = NULL;
	pmd_t *pmd = NULL;
	pte_t *pte = NULL;
	struct mm_struct *mm = current->mm;
	unsigned int addr = (unsigned int)va;
	unsigned int pa = 0;

	down_read(&mm->mmap_sem);

	/* query page tables */
	if (!find_vma(mm, addr)) {
		DIAG_VERB("virt_addr %08X not available.\n", addr);
		goto out;
	}
	pgd = pgd_offset(mm, addr);
	if (pgd_none(*pgd)) {
		DIAG_VERB("Not mapped in pgd.\n");
		goto out;
	}
	pud = pud_offset(pgd, addr);
	if (pud_none(*pud)) {
		DIAG_VERB("Not mapped in pud.\n");
		goto out;
	}
	pmd = pmd_offset(pud, addr);
	if (pmd_none(*pmd)) {
		DIAG_VERB("Not mapped in pmd.\n");
		goto out;
	}
	pte = pte_offset_kernel(pmd, addr);
	if (pte_none(*pte)) {
		DIAG_VERB("Not mapped in pte.\n");
		goto out;
	}
	if (!pte_present(*pte)) {
		DIAG_VERB("pte not in RAM.\n");
		goto out;
	}

	pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);

out:
	up_read(&mm->mmap_sem);
	return pa;
}
Exemplo n.º 8
0
static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
				    unsigned long end, unsigned long pfn,
				    pgprot_t prot)
{
	pte_t *pte;
	unsigned long addr;

	addr = start;
	do {
		pte = pte_offset_kernel(pmd, addr);
		kvm_set_pte(pte, pfn_pte(pfn, prot));
		get_page(virt_to_page(pte));
		kvm_flush_dcache_to_poc(pte, sizeof(*pte));
		pfn++;
	} while (addr += PAGE_SIZE, addr != end);
}
Exemplo n.º 9
0
static void flush_kernel_vm_range(unsigned long start, unsigned long end, 
				  int update_seq)
{
	struct mm_struct *mm;
	pgd_t *pgd;
	pmd_t *pmd;
	pte_t *pte;
	unsigned long addr;
	int updated = 0, err;

	mm = &init_mm;
	for(addr = start; addr < end;){
		pgd = pgd_offset(mm, addr);
		pmd = pmd_offset(pgd, addr);
		if(pmd_present(*pmd)){
			pte = pte_offset_kernel(pmd, addr);
			if(!pte_present(*pte) || pte_newpage(*pte)){
				updated = 1;
				err = os_unmap_memory((void *) addr, 
						      PAGE_SIZE);
				if(err < 0)
					panic("munmap failed, errno = %d\n",
					      -err);
				if(pte_present(*pte))
					map_memory(addr, 
						   pte_val(*pte) & PAGE_MASK,
						   PAGE_SIZE, 1, 1, 1);
			}
			else if(pte_newprot(*pte)){
				updated = 1;
				protect_memory(addr, PAGE_SIZE, 1, 1, 1, 1);
			}
			addr += PAGE_SIZE;
		}
		else {
			if(pmd_newpage(*pmd)){
				updated = 1;
				err = os_unmap_memory((void *) addr, PMD_SIZE);
				if(err < 0)
					panic("munmap failed, errno = %d\n",
					      -err);
			}
			addr += PMD_SIZE;
		}
	}
	if(updated && update_seq) atomic_inc(&vmchange_seq);
}
Exemplo n.º 10
0
static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
			  phys_addr_t addr, const pte_t *new_pte, bool iomap)
{
	pmd_t *pmd;
	pte_t *pte, old_pte;

	/* Create stage-2 page table mapping - Levels 0 and 1 */
	pmd = stage2_get_pmd(kvm, cache, addr);
	if (!pmd) {
		/*
		 * Ignore calls from kvm_set_spte_hva for unallocated
		 * address ranges.
		 */
		return 0;
	}

	/* Create stage-2 page mappings - Level 2 */
	if (pmd_none(*pmd)) {
		if (!cache)
			return 0; /* ignore calls from kvm_set_spte_hva */
		pte = mmu_memory_cache_alloc(cache);
		kvm_clean_pte(pte);
		pmd_populate_kernel(NULL, pmd, pte);
		get_page(virt_to_page(pmd));
	}

	pte = pte_offset_kernel(pmd, addr);

	if (iomap && pte_present(*pte))
		return -EFAULT;

	/* Create 2nd stage page table mapping - Level 3 */
	old_pte = *pte;
	if (pte_present(old_pte)) {
		/* Skip page table update if there is no change */
		if (pte_val(old_pte) == pte_val(*new_pte))
			return 0;

		kvm_set_pte(pte, __pte(0));
		kvm_tlb_flush_vmid_ipa(kvm, addr);
	} else {
		get_page(virt_to_page(pte));
	}

	kvm_set_pte(pte, *new_pte);
	return 0;
}
Exemplo n.º 11
0
static void shmedia_mapioaddr(unsigned long pa, unsigned long va,
			      unsigned long flags)
{
	pgd_t *pgdp;
	pud_t *pudp;
	pmd_t *pmdp;
	pte_t *ptep, pte;
	pgprot_t prot;

	pr_debug("shmedia_mapiopage pa %08lx va %08lx\n",  pa, va);

	if (!flags)
		flags = 1; /* 1 = CB0-1 device */

	pgdp = pgd_offset_k(va);
	if (pgd_none(*pgdp) || !pgd_present(*pgdp)) {
		pudp = (pud_t *)sh64_get_page();
		set_pgd(pgdp, __pgd((unsigned long)pudp | _KERNPG_TABLE));
	}

	pudp = pud_offset(pgdp, va);
	if (pud_none(*pudp) || !pud_present(*pudp)) {
		pmdp = (pmd_t *)sh64_get_page();
		set_pud(pudp, __pud((unsigned long)pmdp | _KERNPG_TABLE));
	}

	pmdp = pmd_offset(pudp, va);
	if (pmd_none(*pmdp) || !pmd_present(*pmdp)) {
		ptep = (pte_t *)sh64_get_page();
		set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE));
	}

	prot = __pgprot(_PAGE_PRESENT | _PAGE_READ     | _PAGE_WRITE  |
			_PAGE_DIRTY   | _PAGE_ACCESSED | _PAGE_SHARED | flags);

	pte = pfn_pte(pa >> PAGE_SHIFT, prot);
	ptep = pte_offset_kernel(pmdp, va);

	if (!pte_none(*ptep) &&
	    pte_val(*ptep) != pte_val(pte))
		pte_ERROR(*ptep);

	set_pte(ptep, pte);

	flush_tlb_kernel_range(va, PAGE_SIZE);
}
Exemplo n.º 12
0
static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
			  phys_addr_t addr, const pte_t *new_pte, bool iomap)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte, old_pte;

	/* Create 2nd stage page table mapping - Level 1 */
	pgd = kvm->arch.pgd + pgd_index(addr);
	pud = pud_offset(pgd, addr);
	if (pud_none(*pud)) {
		if (!cache)
			return 0; /* ignore calls from kvm_set_spte_hva */
		pmd = mmu_memory_cache_alloc(cache);
		pud_populate(NULL, pud, pmd);
		get_page(virt_to_page(pud));
	}

	pmd = pmd_offset(pud, addr);

	/* Create 2nd stage page table mapping - Level 2 */
	if (pmd_none(*pmd)) {
		if (!cache)
			return 0; /* ignore calls from kvm_set_spte_hva */
		pte = mmu_memory_cache_alloc(cache);
		kvm_clean_pte(pte);
		pmd_populate_kernel(NULL, pmd, pte);
		get_page(virt_to_page(pmd));
	}

	pte = pte_offset_kernel(pmd, addr);

	if (iomap && pte_present(*pte))
		return -EFAULT;

	/* Create 2nd stage page table mapping - Level 3 */
	old_pte = *pte;
	kvm_set_pte(pte, *new_pte);
	if (pte_present(old_pte))
		kvm_tlb_flush_vmid_ipa(kvm, addr);
	else
		get_page(virt_to_page(pte));

	return 0;
}
Exemplo n.º 13
0
pte_t *lookup_address(unsigned long address) 
{ 
	pgd_t *pgd = pgd_offset_k(address);
	pud_t *pud;
	pmd_t *pmd;
	if (pgd_none(*pgd))
		return NULL;
	pud = pud_offset(pgd, address);
	if (pud_none(*pud))
		return NULL;
	pmd = pmd_offset(pud, address);
	if (pmd_none(*pmd))
		return NULL;
	if (pmd_large(*pmd))
		return (pte_t *)pmd;
        return pte_offset_kernel(pmd, address);
} 
Exemplo n.º 14
0
static void __init permanent_kmaps_init(pgd_t *pgd_base)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	unsigned long vaddr;

	vaddr = PKMAP_BASE;
	page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);

	pgd = swapper_pg_dir + pgd_index(vaddr);
	pud = pud_offset(pgd, vaddr);
	pmd = pmd_offset(pud, vaddr);
	pte = pte_offset_kernel(pmd, vaddr);
	pkmap_page_table = pte;
}
Exemplo n.º 15
0
/*
 * The performance critical leaf functions are made noinline otherwise gcc
 * inlines everything into a single function which results in too much
 * register pressure.
 */
static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
		unsigned long end, int write, struct page **pages, int *nr)
{
	unsigned long mask, result;
	pte_t *ptep;

	if (tlb_type == hypervisor) {
		result = _PAGE_PRESENT_4V|_PAGE_P_4V;
		if (write)
			result |= _PAGE_WRITE_4V;
	} else {
		result = _PAGE_PRESENT_4U|_PAGE_P_4U;
		if (write)
			result |= _PAGE_WRITE_4U;
	}
	mask = result | _PAGE_SPECIAL;

	ptep = pte_offset_kernel(&pmd, addr);
	do {
		struct page *page, *head;
		pte_t pte = *ptep;

		if ((pte_val(pte) & mask) != result)
			return 0;
		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));

		/* The hugepage case is simplified on sparc64 because
		 * we encode the sub-page pfn offsets into the
		 * hugepage PTEs.  We could optimize this in the future
		 * use page_cache_add_speculative() for the hugepage case.
		 */
		page = pte_page(pte);
		head = compound_head(page);
		if (!page_cache_get_speculative(head))
			return 0;
		if (unlikely(pte_val(pte) != pte_val(*ptep))) {
			put_page(head);
			return 0;
		}

		pages[*nr] = page;
		(*nr)++;
	} while (ptep++, addr += PAGE_SIZE, addr != end);

	return 1;
}
Exemplo n.º 16
0
asmlinkage long long sys_my_syscall(int pid, unsigned long long va)
{
	unsigned long long pageFN;
	unsigned long long pa;

	pgd_t *pgd;
	pmd_t *pmd;
	pud_t *pud;
	pte_t *pte;
	
	struct mm_struct *mm;

	int found = 0;

	struct task_struct *task;
	for_each_process(task)
	{
		if(task->pid == pid)
			mm = task->mm;
	}
	pgd  = pgd_offset(mm,va);
	if(!pgd_none(*pgd) && !pgd_bad(*pgd))
	{
		pud = pud_offset(pgd,va);
		if(!pud_none(*pud) && !pud_bad(*pud))
		{
			pmd = pmd_offset(pud,va);
			if(!pmd_none(*pmd) && !pmd_bad(*pmd))
			{
				pte = pte_offset_kernel(pmd,va);
				if(!pte_none(*pte))
				{
					pageFN = pte_pfn(*pte);
					pa = ((pageFN<<12)|(va&0x00000FFF));
					found = 1;
					return pa;
				}
			}
		}
	}
	if(pgd_none(*pgd) || pud_none(*pud) || pmd_none(*pmd) || pte_none(*pte))
	{
		unsigned long long swapID = (pte_val(*pte) >> 32);
		found = 1;
		return swapID;
	}
Exemplo n.º 17
0
static void __make_page_writable(void *va)
{
	pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t pte, *ptep;
	unsigned long addr = (unsigned long) va;

	pgd = pgd_offset_k(addr);
	pud = pud_offset(pgd, addr);
	pmd = pmd_offset(pud, addr);
	ptep = pte_offset_kernel(pmd, addr);

	pte.pte = ptep->pte | _PAGE_RW;
	if (HYPERVISOR_update_va_mapping(addr, pte, 0))
		xen_l1_entry_update(ptep, pte); /* fallback */

	if ((addr >= VMALLOC_START) && (addr < VMALLOC_END))
		__make_page_writable(__va(pte_pfn(pte) << PAGE_SHIFT));
}
Exemplo n.º 18
0
static int early_map_kernel_page(unsigned long ea, unsigned long pa,
			  pgprot_t flags,
			  unsigned int map_page_size,
			  int nid,
			  unsigned long region_start, unsigned long region_end)
{
	unsigned long pfn = pa >> PAGE_SHIFT;
	pgd_t *pgdp;
	pud_t *pudp;
	pmd_t *pmdp;
	pte_t *ptep;

	pgdp = pgd_offset_k(ea);
	if (pgd_none(*pgdp)) {
		pudp = early_alloc_pgtable(PUD_TABLE_SIZE, nid,
						region_start, region_end);
		pgd_populate(&init_mm, pgdp, pudp);
	}
	pudp = pud_offset(pgdp, ea);
	if (map_page_size == PUD_SIZE) {
		ptep = (pte_t *)pudp;
		goto set_the_pte;
	}
	if (pud_none(*pudp)) {
		pmdp = early_alloc_pgtable(PMD_TABLE_SIZE, nid,
						region_start, region_end);
		pud_populate(&init_mm, pudp, pmdp);
	}
	pmdp = pmd_offset(pudp, ea);
	if (map_page_size == PMD_SIZE) {
		ptep = pmdp_ptep(pmdp);
		goto set_the_pte;
	}
	if (!pmd_present(*pmdp)) {
		ptep = early_alloc_pgtable(PAGE_SIZE, nid,
						region_start, region_end);
		pmd_populate_kernel(&init_mm, pmdp, ptep);
	}
	ptep = pte_offset_kernel(pmdp, ea);

set_the_pte:
	set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
	smp_wmb();
	return 0;
}
Exemplo n.º 19
0
static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
		       phys_addr_t addr, phys_addr_t end)
{
	phys_addr_t start_addr = addr;
	pte_t *pte, *start_pte;

	start_pte = pte = pte_offset_kernel(pmd, addr);
	do {
		if (!pte_none(*pte)) {
			kvm_set_pte(pte, __pte(0));
			put_page(virt_to_page(pte));
			kvm_tlb_flush_vmid_ipa(kvm, addr);
		}
	} while (pte++, addr += PAGE_SIZE, addr != end);

	if (kvm_pte_table_empty(kvm, start_pte))
		clear_pmd_entry(kvm, pmd, start_addr);
}
Exemplo n.º 20
0
static void machine_kexec_page_table_set_one(
	pgd_t *pgd, pmd_t *pmd, pte_t *pte,
	unsigned long vaddr, unsigned long paddr)
{
	pud_t *pud;

	pgd += pgd_index(vaddr);
#ifdef CONFIG_X86_PAE
	if (!(pgd_val(*pgd) & _PAGE_PRESENT))
		set_pgd(pgd, __pgd(__pa(pmd) | _PAGE_PRESENT));
#endif
	pud = pud_offset(pgd, vaddr);
	pmd = pmd_offset(pud, vaddr);
	if (!(pmd_val(*pmd) & _PAGE_PRESENT))
		set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
	pte = pte_offset_kernel(pmd, vaddr);
	set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
}
Exemplo n.º 21
0
/*
 * We can receive a page fault from a migrating PTE at any time.
 * Handle it by just waiting until the fault resolves.
 *
 * It's also possible to get a migrating kernel PTE that resolves
 * itself during the downcall from hypervisor to Linux.  We just check
 * here to see if the PTE seems valid, and if so we retry it.
 *
 * NOTE! We MUST NOT take any locks for this case.  We may be in an
 * interrupt or a critical region, and must do as little as possible.
 * Similarly, we can't use atomic ops here, since we may be handling a
 * fault caused by an atomic op access.
 *
 * If we find a migrating PTE while we're in an NMI context, and we're
 * at a PC that has a registered exception handler, we don't wait,
 * since this thread may (e.g.) have been interrupted while migrating
 * its own stack, which would then cause us to self-deadlock.
 */
static int handle_migrating_pte(pgd_t *pgd, int fault_num,
				unsigned long address, unsigned long pc,
				int is_kernel_mode, int write)
{
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	pte_t pteval;

	if (pgd_addr_invalid(address))
		return 0;

	pgd += pgd_index(address);
	pud = pud_offset(pgd, address);
	if (!pud || !pud_present(*pud))
		return 0;
	pmd = pmd_offset(pud, address);
	if (!pmd || !pmd_present(*pmd))
		return 0;
	pte = pmd_huge_page(*pmd) ? ((pte_t *)pmd) :
		pte_offset_kernel(pmd, address);
	pteval = *pte;
	if (pte_migrating(pteval)) {
		if (in_nmi() && search_exception_tables(pc))
			return 0;
		wait_for_migration(pte);
		return 1;
	}

	if (!is_kernel_mode || !pte_present(pteval))
		return 0;
	if (fault_num == INT_ITLB_MISS) {
		if (pte_exec(pteval))
			return 1;
	} else if (write) {
		if (pte_write(pteval))
			return 1;
	} else {
		if (pte_read(pteval))
			return 1;
	}

	return 0;
}
Exemplo n.º 22
0
// translate the gva to gpa in VM
u64 trans_addr(void *mem, u64 addr, u64 cr3)
{
    u64 pgd_gva;
    u64 pud_gva;
    u64 pmd_gva;
    u64 pte_gva;

    pgd_t *pgd_hva;
    pud_t *pud_hva;
    pmd_t *pmd_hva;
    pte_t *pte_hva;

    // Page Global Directory
    pgd_gva = pgd_offset(cr3, addr);
    //printf("pgd_gva: 0x%016lx\n", pgd_gva);
    pgd_hva = (pgd_t *)(mem+virt_to_phys(pgd_gva));
    //printf("pgd_hva: 0x%016lx\n", (unsigned long)pgd_hva);

    // Page Upper Directory
    pud_gva = pud_offset(pgd_hva, addr);
    //printf("pud_gva: 0x%016lx\n", pud_gva);
    pud_hva = (pud_t *)(mem+virt_to_phys(pud_gva));
    //printf("pud_hva: 0x%016lx\n", (unsigned long)pud_hva->pud);

    // Page Middle Directory
    pmd_gva = pmd_offset(pud_hva, addr);
    //printf("pmd_gva: 0x%016lx\n", pmd_gva);
    pmd_hva = (pmd_t *)(mem+virt_to_phys(pmd_gva));
    //printf("pmd_hva: 0x%016lx\n", (unsigned long)pmd_hva->pmd);

    // Page Table Entry
    pte_gva = pte_offset_kernel(pmd_hva, addr);
    //printf("pte_gva: 0x%016lx\n", pte_gva);
    pte_hva = (pte_t *)(mem+virt_to_phys(pte_gva));
    //printf("pte_hva: 0x%016lx\n", (unsigned long)pte_hva->pte);

    u64 page_mask = page_level_mask(1);
    u64 phys_addr = pte_pfn(pte_hva) << PAGE_SHIFT;
    u64 offset    = addr & ~page_mask;

    //printf("Physical Address: 0x%016lx\n", (unsigned long)(phys_addr|offset));

    return (unsigned long)(phys_addr|offset);
}
Exemplo n.º 23
0
/*
 * free page(s) as defined by the above mapping.
 */
void consistent_free(size_t size, void *vaddr)
{
	struct page *page;

	if (in_interrupt())
		BUG();

	size = PAGE_ALIGN(size);

#ifndef CONFIG_MMU
	/* Clear SHADOW_MASK bit in address, and free as per usual */
# ifdef CONFIG_XILINX_UNCACHED_SHADOW
	vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK);
# endif
	page = virt_to_page(vaddr);

	do {
		__free_reserved_page(page);
		page++;
	} while (size -= PAGE_SIZE);
#else
	do {
		pte_t *ptep;
		unsigned long pfn;

		ptep = pte_offset_kernel(pmd_offset(pgd_offset_k(
						(unsigned int)vaddr),
					(unsigned int)vaddr),
				(unsigned int)vaddr);
		if (!pte_none(*ptep) && pte_present(*ptep)) {
			pfn = pte_pfn(*ptep);
			pte_clear(&init_mm, (unsigned int)vaddr, ptep);
			if (pfn_valid(pfn)) {
				page = pfn_to_page(pfn);
				__free_reserved_page(page);
			}
		}
		vaddr += PAGE_SIZE;
	} while (size -= PAGE_SIZE);

	/* flush tlb */
	flush_tlb_all();
#endif
}
Exemplo n.º 24
0
/*
 * map_kernel_page currently only called by __ioremap
 * map_kernel_page adds an entry to the ioremap page table
 * and adds an entry to the HPT, possibly bolting it
 */
int __ref map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
{
	pgd_t *pgdp;
	pud_t *pudp;
	pmd_t *pmdp;
	pte_t *ptep;

	BUILD_BUG_ON(TASK_SIZE_USER64 > PGTABLE_RANGE);
	if (slab_is_available()) {
		pgdp = pgd_offset_k(ea);
		pudp = pud_alloc(&init_mm, pgdp, ea);
		if (!pudp)
			return -ENOMEM;
		pmdp = pmd_alloc(&init_mm, pudp, ea);
		if (!pmdp)
			return -ENOMEM;
		ptep = pte_alloc_kernel(pmdp, ea);
		if (!ptep)
			return -ENOMEM;
	} else {
		pgdp = pgd_offset_k(ea);
#ifndef __PAGETABLE_PUD_FOLDED
		if (pgd_none(*pgdp)) {
			pudp = early_alloc_pgtable(PUD_TABLE_SIZE);
			pgd_populate(&init_mm, pgdp, pudp);
		}
#endif /* !__PAGETABLE_PUD_FOLDED */
		pudp = pud_offset(pgdp, ea);
		if (pud_none(*pudp)) {
			pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
			pud_populate(&init_mm, pudp, pmdp);
		}
		pmdp = pmd_offset(pudp, ea);
		if (!pmd_present(*pmdp)) {
			ptep = early_alloc_pgtable(PAGE_SIZE);
			pmd_populate_kernel(&init_mm, pmdp, ptep);
		}
		ptep = pte_offset_kernel(pmdp, ea);
	}
	set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot));

	smp_wmb();
	return 0;
}
Exemplo n.º 25
0
static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
		unsigned long end, int write, struct page **pages, int *nr)
{
	unsigned long mask, result;
	pte_t *ptep;

	if (tlb_type == hypervisor) {
		result = _PAGE_PRESENT_4V|_PAGE_P_4V;
		if (write)
			result |= _PAGE_WRITE_4V;
	} else {
		result = _PAGE_PRESENT_4U|_PAGE_P_4U;
		if (write)
			result |= _PAGE_WRITE_4U;
	}
	mask = result | _PAGE_SPECIAL;

	ptep = pte_offset_kernel(&pmd, addr);
	do {
		struct page *page, *head;
		pte_t pte = *ptep;

		if ((pte_val(pte) & mask) != result)
			return 0;
		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));

		page = pte_page(pte);
		head = compound_head(page);
		if (!page_cache_get_speculative(head))
			return 0;
		if (unlikely(pte_val(pte) != pte_val(*ptep))) {
			put_page(head);
			return 0;
		}
		if (head != page)
			get_huge_page_tail(page);

		pages[*nr] = page;
		(*nr)++;
	} while (ptep++, addr += PAGE_SIZE, addr != end);

	return 1;
}
Exemplo n.º 26
0
/*
 * Create a page table and place a pointer to it in a middle page
 * directory entry:
 */
static pte_t * __init one_page_table_init(pmd_t *pmd)
{
	if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
		pte_t *page_table = NULL;

		if (after_bootmem) {
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
			page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
#endif
			if (!page_table)
				page_table =
				(pte_t *)alloc_bootmem_pages(PAGE_SIZE);
		} else
			page_table = (pte_t *)alloc_low_page();

		paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
		set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
		BUG_ON(page_table != pte_offset_kernel(pmd, 0));
	}
Exemplo n.º 27
0
/*
 * __iounmap unmaps nearly everything, so be careful
 * it doesn't free currently pointer/page tables anymore but it
 * wans't used anyway and might be added later.
 */
void __iounmap(void *addr, unsigned long size)
{
	unsigned long virtaddr = (unsigned long)addr;
	pgd_t *pgd_dir;
	pmd_t *pmd_dir;
	pte_t *pte_dir;

	while ((long)size > 0) {
		pgd_dir = pgd_offset_k(virtaddr);
		if (pgd_bad(*pgd_dir)) {
			printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
			pgd_clear(pgd_dir);
			return;
		}
		pmd_dir = pmd_offset(pgd_dir, virtaddr);

		if (CPU_IS_020_OR_030) {
			int pmd_off = (virtaddr/PTRTREESIZE) & 15;
			int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK;

			if (pmd_type == _PAGE_PRESENT) {
				pmd_dir->pmd[pmd_off] = 0;
				virtaddr += PTRTREESIZE;
				size -= PTRTREESIZE;
				continue;
			} else if (pmd_type == 0)
				continue;
		}

		if (pmd_bad(*pmd_dir)) {
			printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
			pmd_clear(pmd_dir);
			return;
		}
		pte_dir = pte_offset_kernel(pmd_dir, virtaddr);

		pte_val(*pte_dir) = 0;
		virtaddr += PAGE_SIZE;
		size -= PAGE_SIZE;
	}

	flush_tlb_all();
}
Exemplo n.º 28
0
void __init pagetable_init(void)
{
	unsigned long vaddr;
	pgd_t *pgd_base;
#ifdef CONFIG_HIGHMEM
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
#endif

	/* Initialize the entire pgd.  */
	pgd_init((unsigned long)swapper_pg_dir);
	pgd_init((unsigned long)swapper_pg_dir
		 + sizeof(pgd_t) * USER_PTRS_PER_PGD);

	pgd_base = swapper_pg_dir;

	/*
	 * Fixed mappings:
	 */
#ifdef CONFIG_BCM53000_HIGHMEM
	vaddr = __fix_to_virt(VALIAS_IDX(__end_of_fixed_addresses - 1)) & PMD_MASK;
#else
	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
#endif
	fixrange_init(vaddr, 0, pgd_base);

#ifdef CONFIG_HIGHMEM
	/*
	 * Permanent kmaps:
	 */
	vaddr = PKMAP_BASE;
	fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);

	pgd = swapper_pg_dir + __pgd_offset(vaddr);
	pud = pud_offset(pgd, vaddr);
	pmd = pmd_offset(pud, vaddr);
	pte = pte_offset_kernel(pmd, vaddr);
	pkmap_page_table = pte;
#endif
}
Exemplo n.º 29
0
pte_t *virt_to_pte(struct mm_struct* mm, unsigned long addr)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;

	if (pgd_addr_invalid(addr))
		return NULL;

	pgd = mm ? pgd_offset(mm, addr) : swapper_pg_dir + pgd_index(addr);
	pud = pud_offset(pgd, addr);
	if (!pud_present(*pud))
		return NULL;
	pmd = pmd_offset(pud, addr);
	if (pmd_huge_page(*pmd))
		return (pte_t *)pmd;
	if (!pmd_present(*pmd))
		return NULL;
	return pte_offset_kernel(pmd, addr);
}
Exemplo n.º 30
0
static inline unsigned long uvirt_to_kva(pgd_t *pgd, unsigned long adr)
{
  unsigned long ret = 0UL;
  pmd_t *pmd;
  pte_t *ptep, pte;
  
  if (!pgd_none(*pgd)) {
    pmd = pmd_offset(pgd, adr);
    if (!pmd_none(*pmd)) {
      ptep = pte_offset_kernel(pmd, adr);
      pte = *ptep;
      if(pte_present(pte)) {
	ret = (unsigned long) page_address(pte_page(pte));
	ret |= (adr & (PAGE_SIZE - 1));
      }
    }
  }
  //  printk(KERN_INFO "uv2kva(%lx-->%lx) \n", adr, ret);
  return ret;
}