Ejemplo n.º 1
0
static int set_up_temporary_text_mapping(pgd_t *pgd_base)
{
	pgd_t *pgd;
	pmd_t *pmd;
	pte_t *pte;

	pgd = pgd_base + pgd_index(restore_jump_address);

	pmd = resume_one_md_table_init(pgd);
	if (!pmd)
		return -ENOMEM;

	if (boot_cpu_has(X86_FEATURE_PSE)) {
		set_pmd(pmd + pmd_index(restore_jump_address),
		__pmd((jump_address_phys & PMD_MASK) | pgprot_val(PAGE_KERNEL_LARGE_EXEC)));
	} else {
		pte = resume_one_page_table_init(pmd);
		if (!pte)
			return -ENOMEM;
		set_pte(pte + pte_index(restore_jump_address),
		__pte((jump_address_phys & PAGE_MASK) | pgprot_val(PAGE_KERNEL_EXEC)));
	}

	return 0;
}
Ejemplo n.º 2
0
static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
	unsigned long prot)
{
	pmd_t *pmd;
	unsigned long next;

	if (pud_none_or_clear_bad(pud) || (pud_val(*pud) & L_PGD_SWAPPER)) {
		pmd = pmd_alloc_one(&init_mm, addr);
		if (!pmd) {
			pr_warning("Failed to allocate identity pmd.\n");
			return;
		}
		/*
		 * Copy the original PMD to ensure that the PMD entries for
		 * the kernel image are preserved.
		 */
		if (!pud_none(*pud))
			memcpy(pmd, pmd_offset(pud, 0),
			       PTRS_PER_PMD * sizeof(pmd_t));
		pud_populate(&init_mm, pud, pmd);
		pmd += pmd_index(addr);
	} else
		pmd = pmd_offset(pud, addr);

	do {
		next = pmd_addr_end(addr, end);
		*pmd = __pmd((addr & PMD_MASK) | prot);
		flush_pmd_entry(pmd);
	} while (pmd++, addr = next, addr != end);
}
Ejemplo n.º 3
0
/*
 * NOTE: The pagetables are allocated contiguous on the physical space 
 * so we can cache the place of the first one and move around without 
 * checking the pgd every time.
 */
static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	int pgd_idx, pmd_idx;
	unsigned long vaddr;

	vaddr = start;
	pgd_idx = pgd_index(vaddr);
	pmd_idx = pmd_index(vaddr);
	pgd = pgd_base + pgd_idx;

	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
		if (pgd_none(*pgd)) 
			one_md_table_init(pgd);
		pud = pud_offset(pgd, vaddr);
		pmd = pmd_offset(pud, vaddr);
		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
			if (pmd_none(*pmd)) 
				one_page_table_init(pmd);

			vaddr += PMD_SIZE;
		}
		pmd_idx = 0;
	}
}
Ejemplo n.º 4
0
/*H:508 When the Guest calls LHCALL_LGUEST_INIT we do more setup. */
void page_table_guest_data_init(struct lg_cpu *cpu)
{
	/* We get the kernel address: above this is all kernel memory. */
	if (get_user(cpu->lg->kernel_address,
		&cpu->lg->lguest_data->kernel_address)
		/*
		 * We tell the Guest that it can't use the top 2 or 4 MB
		 * of virtual addresses used by the Switcher.
		 */
		|| put_user(RESERVE_MEM * 1024 * 1024,
			    &cpu->lg->lguest_data->reserve_mem)) {
		kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
		return;
	}

	/*
	 * In flush_user_mappings() we loop from 0 to
	 * "pgd_index(lg->kernel_address)".  This assumes it won't hit the
	 * Switcher mappings, so check that now.
	 */
#ifdef CONFIG_X86_PAE
	if (pgd_index(cpu->lg->kernel_address) == SWITCHER_PGD_INDEX &&
		pmd_index(cpu->lg->kernel_address) == SWITCHER_PMD_INDEX)
#else
	if (pgd_index(cpu->lg->kernel_address) >= SWITCHER_PGD_INDEX)
#endif
		kill_guest(cpu, "bad kernel address %#lx",
				 cpu->lg->kernel_address);
}
Ejemplo n.º 5
0
static void __meminit early_make_page_readonly(void *va, unsigned int feature)
{
	unsigned long addr, _va = (unsigned long)va;
	pte_t pte, *ptep;
	unsigned long *page = (unsigned long *) init_level4_pgt;

	BUG_ON(after_bootmem);

	if (xen_feature(feature))
		return;

	addr = (unsigned long) page[pgd_index(_va)];
	addr_to_page(addr, page);

	addr = page[pud_index(_va)];
	addr_to_page(addr, page);

	addr = page[pmd_index(_va)];
	addr_to_page(addr, page);

	ptep = (pte_t *) &page[pte_index(_va)];

	pte.pte = ptep->pte & ~_PAGE_RW;
	if (HYPERVISOR_update_va_mapping(_va, pte, 0))
		BUG();
}
Ejemplo n.º 6
0
static int set_up_temporary_text_mapping(pgd_t *pgd)
{
	pmd_t *pmd;
	pud_t *pud;

	/*
	 * The new mapping only has to cover the page containing the image
	 * kernel's entry point (jump_address_phys), because the switch over to
	 * it is carried out by relocated code running from a page allocated
	 * specifically for this purpose and covered by the identity mapping, so
	 * the temporary kernel text mapping is only needed for the final jump.
	 * Moreover, in that mapping the virtual address of the image kernel's
	 * entry point must be the same as its virtual address in the image
	 * kernel (restore_jump_address), so the image kernel's
	 * restore_registers() code doesn't find itself in a different area of
	 * the virtual address space after switching over to the original page
	 * tables used by the image kernel.
	 */
	pud = (pud_t *)get_safe_page(GFP_ATOMIC);
	if (!pud)
		return -ENOMEM;

	pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
	if (!pmd)
		return -ENOMEM;

	set_pmd(pmd + pmd_index(restore_jump_address),
		__pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC));
	set_pud(pud + pud_index(restore_jump_address),
		__pud(__pa(pmd) | _KERNPG_TABLE));
	set_pgd(pgd + pgd_index(restore_jump_address),
		__pgd(__pa(pud) | _KERNPG_TABLE));

	return 0;
}
Ejemplo n.º 7
0
static int set_up_temporary_text_mapping(pgd_t *pgd)
{
	pmd_t *pmd;
	pud_t *pud;
	p4d_t *p4d = NULL;
	pgprot_t pgtable_prot = __pgprot(_KERNPG_TABLE);
	pgprot_t pmd_text_prot = __pgprot(__PAGE_KERNEL_LARGE_EXEC);

	/* Filter out unsupported __PAGE_KERNEL* bits: */
	pgprot_val(pmd_text_prot) &= __default_kernel_pte_mask;
	pgprot_val(pgtable_prot)  &= __default_kernel_pte_mask;

	/*
	 * The new mapping only has to cover the page containing the image
	 * kernel's entry point (jump_address_phys), because the switch over to
	 * it is carried out by relocated code running from a page allocated
	 * specifically for this purpose and covered by the identity mapping, so
	 * the temporary kernel text mapping is only needed for the final jump.
	 * Moreover, in that mapping the virtual address of the image kernel's
	 * entry point must be the same as its virtual address in the image
	 * kernel (restore_jump_address), so the image kernel's
	 * restore_registers() code doesn't find itself in a different area of
	 * the virtual address space after switching over to the original page
	 * tables used by the image kernel.
	 */

	if (pgtable_l5_enabled()) {
		p4d = (p4d_t *)get_safe_page(GFP_ATOMIC);
		if (!p4d)
			return -ENOMEM;
	}

	pud = (pud_t *)get_safe_page(GFP_ATOMIC);
	if (!pud)
		return -ENOMEM;

	pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
	if (!pmd)
		return -ENOMEM;

	set_pmd(pmd + pmd_index(restore_jump_address),
		__pmd((jump_address_phys & PMD_MASK) | pgprot_val(pmd_text_prot)));
	set_pud(pud + pud_index(restore_jump_address),
		__pud(__pa(pmd) | pgprot_val(pgtable_prot)));
	if (p4d) {
		p4d_t new_p4d = __p4d(__pa(pud) | pgprot_val(pgtable_prot));
		pgd_t new_pgd = __pgd(__pa(p4d) | pgprot_val(pgtable_prot));

		set_p4d(p4d + p4d_index(restore_jump_address), new_p4d);
		set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
	} else {
		/* No p4d for 4-level paging: point the pgd to the pud page table */
		pgd_t new_pgd = __pgd(__pa(pud) | pgprot_val(pgtable_prot));
		set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
	}

	return 0;
}
Ejemplo n.º 8
0
unsigned long virtaddr_to_physaddr(struct mm_struct *mm, unsigned long vaddr)
{
    pgd_t *pgd;
    pud_t *pud;
    pmd_t *pmd;
    pte_t *pte;
    unsigned long paddr = 0;

    pgd = pgd_offset(mm, vaddr);
    printk("pgd_val = 0x%lx\n", pgd_val(*pgd));
    printk("pgd_index = %lu\n", pgd_index(vaddr));
    if (pgd_none(*pgd)) {
        printk("not mapped in pgd\n");
        return INVALID_ADDR;
    }

    pud = pud_offset(pgd, vaddr);
    printk("pud_val = 0x%lx\n", pud_val(*pud));
    printk("pud_index = %lu\n", pud_index(vaddr));
    if (pud_none(*pud)) {
        printk("not mapped in pud\n");
        return INVALID_ADDR;
    }

    pmd = pmd_offset(pud, vaddr);
    printk("pmd_val = 0x%lx\n", pmd_val(*pmd));
    printk("pmd_index = %lx\n", pmd_index(vaddr));
    if(pmd_none(*pmd)){
        printk("not mapped in pmd\n");
        return INVALID_ADDR;
    }
    /*If pmd_large is true, represent pmd is the last level*/
    if(pmd_large(*pmd)){
        paddr = (pmd_val(*pmd) & PAGE_MASK);
        paddr = paddr | (vaddr & ~PAGE_MASK);
        return paddr;
    }
    /*Walk the forth level page table
    ** you may use PAGE_MASK = 0xfffffffffffff000 to help you get [0:11] bits
    ***/
    else{
        /* XXX: Need to implement */
    	pte = pte_offset_kernel(pmd, vaddr);
    	printk("pte_val = 0x%lx\n", pte_val(*pte));
    	printk("pte_index = %lx\n", pte_index(vaddr));
    	if(pte_none(*pte)){
    	    printk("not mapped in pte\n");
    	    return INVALID_ADDR;
    	}
        paddr = (pte_val(*pte) & PAGE_MASK);
        paddr = paddr | (vaddr & ~PAGE_MASK);
    	printk("paddr = %lx\n", paddr);
    	printk("__pa = %lx\n", __pa(vaddr));   /* magic macro in the kernel */
        /* End of implement */
        return paddr;
    }

}
Ejemplo n.º 9
0
/*
 * for Xen extraction
 */
unsigned long long
kvtop_xen_x86_64(unsigned long kvaddr)
{
	unsigned long long dirp, entry;

	if (!is_xen_vaddr(kvaddr))
		return NOT_PADDR;

	if (is_xen_text(kvaddr))
		return (unsigned long)kvaddr - XEN_VIRT_START + info->xen_phys_start;

	if (is_direct(kvaddr))
		return (unsigned long)kvaddr - DIRECTMAP_VIRT_START;

	if ((dirp = kvtop_xen_x86_64(SYMBOL(pgd_l4))) == NOT_PADDR)
		return NOT_PADDR;
	dirp += pml4_index(kvaddr) * sizeof(unsigned long long);
	if (!readmem(MADDR_XEN, dirp, &entry, sizeof(entry)))
		return NOT_PADDR;

	if (!(entry & _PAGE_PRESENT))
		return NOT_PADDR;

	dirp = entry & ENTRY_MASK;
	dirp += pgd_index(kvaddr) * sizeof(unsigned long long);
	if (!readmem(MADDR_XEN, dirp, &entry, sizeof(entry)))
		return NOT_PADDR;

	if (!(entry & _PAGE_PRESENT))
		return NOT_PADDR;

	dirp = entry & ENTRY_MASK;
	dirp += pmd_index(kvaddr) * sizeof(unsigned long long);
	if (!readmem(MADDR_XEN, dirp, &entry, sizeof(entry)))
		return NOT_PADDR;

	if (!(entry & _PAGE_PRESENT))
		return NOT_PADDR;

	if (entry & _PAGE_PSE) {
		entry = (entry & ENTRY_MASK) + (kvaddr & ((1UL << PMD_SHIFT) - 1));
		return entry;
	}
	dirp = entry & ENTRY_MASK;
	dirp += pte_index(kvaddr) * sizeof(unsigned long long);
	if (!readmem(MADDR_XEN, dirp, &entry, sizeof(entry)))
		return NOT_PADDR;

	if (!(entry & _PAGE_PRESENT)) {
		return NOT_PADDR;
	}

	entry = (entry & ENTRY_MASK) + (kvaddr & ((1UL << PTE_SHIFT) - 1));

	return entry;
}
Ejemplo n.º 10
0
/* Create a new PMD entry */
int __init early_make_pgtable(unsigned long address)
{
	unsigned long physaddr = address - __PAGE_OFFSET;
	unsigned long i;
	pgdval_t pgd, *pgd_p;
	pudval_t pud, *pud_p;
	pmdval_t pmd, *pmd_p;

	/* Invalid address or early pgt is done ?  */
	if (physaddr >= MAXMEM || read_cr3() != __pa(early_level4_pgt))
		return -1;

again:
	pgd_p = &early_level4_pgt[pgd_index(address)].pgd;
	pgd = *pgd_p;

	/*
	 * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
	 * critical -- __PAGE_OFFSET would point us back into the dynamic
	 * range and we might end up looping forever...
	 */
	if (pgd)
		pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
	else {
		if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
			reset_early_page_tables();
			goto again;
		}

		pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
		for (i = 0; i < PTRS_PER_PUD; i++)
			pud_p[i] = 0;
		*pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
	}
	pud_p += pud_index(address);
	pud = *pud_p;

	if (pud)
		pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
	else {
		if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
			reset_early_page_tables();
			goto again;
		}

		pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
		for (i = 0; i < PTRS_PER_PMD; i++)
			pmd_p[i] = 0;
		*pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
	}
	pmd = (physaddr & PMD_MASK) + early_pmd_flags;
	pmd_p[pmd_index(address)] = pmd;

	return 0;
}
Ejemplo n.º 11
0
/*
 * This routine then takes the PGD entry given above, which contains the
 * address of the PMD page.  It then returns a pointer to the PMD entry for the
 * given address.
 */
static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
{
	unsigned int index = pmd_index(vaddr);
	pmd_t *page;

	/* You should never call this if the PGD entry wasn't valid */
	BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
	page = __va(pgd_pfn(spgd) << PAGE_SHIFT);

	return &page[index];
}
Ejemplo n.º 12
0
void set_pmde(pgd_t * pgde,struct page* pf,viraddr_t address,uint32_t perm)
{
#ifdef DEBUG
	assert(pgde);
	assert(pf);
	assert(page2pfn(pf) || !page2pfn(pf));
#endif	
	pmd_t * pmde = NULL;
	pmde = (pmd_t *)pgd_page_vaddr(*pgde) + pmd_index(address);
	pmd_val(*pmde) = page2phys(pf) | perm;
}
Ejemplo n.º 13
0
/* To avoid virtual aliases later */
__meminit void early_iounmap(void *addr, unsigned long size)
{
	unsigned long vaddr;
	pmd_t *pmd;
	int i, pmds;

	vaddr = (unsigned long)addr;
	pmds = ((vaddr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
	pmd = level2_kernel_pgt + pmd_index(vaddr);
	for (i = 0; i < pmds; i++)
		pmd_clear(pmd + i);
	__flush_tlb();
}
Ejemplo n.º 14
0
static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
			   unsigned long addr, unsigned long end)
{
	addr &= PMD_MASK;
	for (; addr < end; addr += PMD_SIZE) {
		pmd_t *pmd = pmd_page + pmd_index(addr);

		if (pmd_present(*pmd))
			continue;

		set_pmd(pmd, __pmd((addr - info->offset) | info->pmd_flag));
	}
}
Ejemplo n.º 15
0
Archivo: x86.c Proyecto: karibou/progit
unsigned long long
vtop_x86_PAE(unsigned long vaddr)
{
	unsigned long long page_dir, pgd_pte, pmd_paddr, pmd_pte;
	unsigned long long pte_paddr, pte;

	if (SYMBOL(swapper_pg_dir) == NOT_FOUND_SYMBOL) {
		ERRMSG("Can't get the symbol of swapper_pg_dir.\n");
		return NOT_PADDR;
	}

	page_dir  = SYMBOL(swapper_pg_dir);
	page_dir += pgd_index_PAE(vaddr) * sizeof(unsigned long long);
	if (!readmem(VADDR, page_dir, &pgd_pte, sizeof(pgd_pte))) {
		ERRMSG("Can't get pgd_pte (page_dir:%llx).\n", page_dir);
		return NOT_PADDR;
	}
	if (!(pgd_pte & _PAGE_PRESENT))
		return NOT_PADDR;

	if (info->vaddr_for_vtop == vaddr)
		MSG("  PGD : %16llx => %16llx\n", page_dir, pgd_pte);

	pmd_paddr  = pgd_pte & ENTRY_MASK;
	pmd_paddr += pmd_index(vaddr) * sizeof(unsigned long long);
	if (!readmem(PADDR, pmd_paddr, &pmd_pte, sizeof(pmd_pte))) {
		ERRMSG("Can't get pmd_pte (pmd_paddr:%llx).\n", pmd_paddr);
		return NOT_PADDR;
	}
	if (!(pmd_pte & _PAGE_PRESENT))
		return NOT_PADDR;

	if (info->vaddr_for_vtop == vaddr)
		MSG("  PMD : %16llx => %16llx\n", pmd_paddr, pmd_pte);

	if (pmd_pte & _PAGE_PSE)
		return (pmd_pte & ENTRY_MASK) + (vaddr & ((1UL << PMD_SHIFT) - 1));

	pte_paddr  = pmd_pte & ENTRY_MASK;
	pte_paddr += pte_index(vaddr) * sizeof(unsigned long long);
	if (!readmem(PADDR, pte_paddr, &pte, sizeof(pte)))
		return NOT_PADDR;

	if (!(pte & _PAGE_PRESENT))
		return NOT_PADDR;

	if (info->vaddr_for_vtop == vaddr)
		MSG("  PTE : %16llx => %16llx\n", pte_paddr, pte);

	return (pte & ENTRY_MASK) + (vaddr & ((1UL << PTE_SHIFT) - 1));
}
Ejemplo n.º 16
0
static void __meminit
phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
{
	int i = pmd_index(address);

	for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {
		unsigned long entry;
		pmd_t *pmd = pmd_page + pmd_index(address);

		if (address >= end) {
			if (!after_bootmem)
				for (; i < PTRS_PER_PMD; i++, pmd++)
					set_pmd(pmd, __pmd(0));
			break;
		}

		if (pmd_val(*pmd))
			continue;

		entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;
		entry &= __supported_pte_mask;
		set_pmd(pmd, __pmd(entry));
	}
}
Ejemplo n.º 17
0
static unsigned long page_table_shareable(struct vm_area_struct *svma,
				struct vm_area_struct *vma,
				unsigned long addr, pgoff_t idx)
{
	unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
				svma->vm_start;
	unsigned long sbase = saddr & PUD_MASK;
	unsigned long s_end = sbase + PUD_SIZE;

	/* Allow segments to share if only one is marked locked */
	unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED;
	unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED;

	/*
	 * match the virtual addresses, permission and the alignment of the
	 * page table page.
	 */
	if (pmd_index(addr) != pmd_index(saddr) ||
	    vm_flags != svm_flags ||
	    sbase < svma->vm_start || svma->vm_end < s_end)
		return 0;

	return saddr;
}
Ejemplo n.º 18
0
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
#ifdef CONFIG_HUGETLB_SUPER_PAGES
	pte_t *pte;
#endif

	/* Get the top-level page table entry. */
	pgd = (pgd_t *)get_pte((pte_t *)mm->pgd, pgd_index(addr), 0);

	/* We don't have four levels. */
	pud = pud_offset(pgd, addr);
#ifndef __PAGETABLE_PUD_FOLDED
# error support fourth page table level
#endif
	if (!pud_present(*pud))
		return NULL;

	/* Check for an L0 huge PTE, if we have three levels. */
#ifndef __PAGETABLE_PMD_FOLDED
	if (pud_huge(*pud))
		return (pte_t *)pud;

	pmd = (pmd_t *)get_pte((pte_t *)pud_page_vaddr(*pud),
			       pmd_index(addr), 1);
	if (!pmd_present(*pmd))
		return NULL;
#else
	pmd = pmd_offset(pud, addr);
#endif

	/* Check for an L1 huge PTE. */
	if (pmd_huge(*pmd))
		return (pte_t *)pmd;

#ifdef CONFIG_HUGETLB_SUPER_PAGES
	/* Check for an L2 huge PTE. */
	pte = get_pte((pte_t *)pmd_page_vaddr(*pmd), pte_index(addr), 2);
	if (!pte_present(*pte))
		return NULL;
	if (pte_super(*pte))
		return pte;
#endif

	return NULL;
}
Ejemplo n.º 19
0
/*
 * This routine then takes the PGD entry given above, which contains the
 * address of the PMD page.  It then returns a pointer to the PMD entry for the
 * given address.
 */
static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
{
	unsigned int index = pmd_index(vaddr);
	pmd_t *page;

	/* We kill any Guest trying to touch the Switcher addresses. */
	if (pgd_index(vaddr) == SWITCHER_PGD_INDEX &&
					index >= SWITCHER_PMD_INDEX) {
		kill_guest(cpu, "attempt to access switcher pages");
		index = 0;
	}

	/* You should never call this if the PGD entry wasn't valid */
	BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
	page = __va(pgd_pfn(spgd) << PAGE_SHIFT);

	return &page[index];
}
Ejemplo n.º 20
0
static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
	unsigned long prot)
{
	pmd_t *pmd;
	unsigned long next;

	if (pud_none_or_clear_bad(pud) || (pud_val(*pud) & L_PGD_SWAPPER)) {
		pmd = pmd_alloc_one(NULL, addr);
		if (!pmd) {
			pr_warning("Failed to allocate identity pmd.\n");
			return;
		}
		pud_populate(NULL, pud, pmd);
		pmd += pmd_index(addr);
	} else
		pmd = pmd_offset(pud, addr);

	do {
		next = pmd_addr_end(addr, end);
		*pmd = __pmd((addr & PMD_MASK) | prot);
		flush_pmd_entry(pmd);
	} while (pmd++, addr = next, addr != end);
}
Ejemplo n.º 21
0
int MapTable::extend(RegFile& regf, const MapInfo& info)
{
    void* start_ = info._addr;
    void* end_ = start_ + info._len;

    while (start_ < end_) {
        uint g_ = pgd_index(_end);
        uint m_ = pmd_index(_end);
        if (!m_) {
            void** addr_ = pmd_alloc();
            if (!addr_) return -1;

            _pgd[g_] = addr_;
        }

        _pgd[g_][m_] = start_;
        start_ += 4096;
        _end += 4096;
    }

    regf.stop = _end;
    return 0;
}
Ejemplo n.º 22
0
/* Follow the PGD to the PMD. */
static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr)
{
	unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
	BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
	return gpage + pmd_index(vaddr) * sizeof(pmd_t);
}
Ejemplo n.º 23
0
/*
 * Translate a virtual address to a physical address by using 4 levels paging.
 */
unsigned long long
vtop4_x86_64(unsigned long vaddr)
{
	unsigned long page_dir, pml4, pgd_paddr, pgd_pte, pmd_paddr, pmd_pte;
	unsigned long pte_paddr, pte;

	if (SYMBOL(init_level4_pgt) == NOT_FOUND_SYMBOL) {
		ERRMSG("Can't get the symbol of init_level4_pgt.\n");
		return NOT_PADDR;
	}

	/*
	 * Get PGD.
	 */
	page_dir  = SYMBOL(init_level4_pgt);
	page_dir += pml4_index(vaddr) * sizeof(unsigned long);
	if (!readmem(VADDR, page_dir, &pml4, sizeof pml4)) {
		ERRMSG("Can't get pml4 (page_dir:%lx).\n", page_dir);
		return NOT_PADDR;
	}
	if (info->vaddr_for_vtop == vaddr)
		MSG("  PGD : %16lx => %16lx\n", page_dir, pml4);

	if (!(pml4 & _PAGE_PRESENT)) {
		ERRMSG("Can't get a valid pml4.\n");
		return NOT_PADDR;
	}

	/*
	 * Get PUD.
	 */
	pgd_paddr  = pml4 & PHYSICAL_PAGE_MASK;
	pgd_paddr += pgd_index(vaddr) * sizeof(unsigned long);
	if (!readmem(PADDR, pgd_paddr, &pgd_pte, sizeof pgd_pte)) {
		ERRMSG("Can't get pgd_pte (pgd_paddr:%lx).\n", pgd_paddr);
		return NOT_PADDR;
	}
	if (info->vaddr_for_vtop == vaddr)
		MSG("  PUD : %16lx => %16lx\n", pgd_paddr, pgd_pte);

	if (!(pgd_pte & _PAGE_PRESENT)) {
		ERRMSG("Can't get a valid pgd_pte.\n");
		return NOT_PADDR;
	}

	/*
	 * Get PMD.
	 */
	pmd_paddr  = pgd_pte & PHYSICAL_PAGE_MASK;
	pmd_paddr += pmd_index(vaddr) * sizeof(unsigned long);
	if (!readmem(PADDR, pmd_paddr, &pmd_pte, sizeof pmd_pte)) {
		ERRMSG("Can't get pmd_pte (pmd_paddr:%lx).\n", pmd_paddr);
		return NOT_PADDR;
	}
	if (info->vaddr_for_vtop == vaddr)
		MSG("  PMD : %16lx => %16lx\n", pmd_paddr, pmd_pte);

	if (!(pmd_pte & _PAGE_PRESENT)) {
		ERRMSG("Can't get a valid pmd_pte.\n");
		return NOT_PADDR;
	}
	if (pmd_pte & _PAGE_PSE)
		return (PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK)
			+ (vaddr & ~_2MB_PAGE_MASK);

	/*
	 * Get PTE.
	 */
	pte_paddr  = pmd_pte & PHYSICAL_PAGE_MASK;
	pte_paddr += pte_index(vaddr) * sizeof(unsigned long);
	if (!readmem(PADDR, pte_paddr, &pte, sizeof pte)) {
		ERRMSG("Can't get pte (pte_paddr:%lx).\n", pte_paddr);
		return NOT_PADDR;
	}
	if (info->vaddr_for_vtop == vaddr)
		MSG("  PTE : %16lx => %16lx\n", pte_paddr, pte);

	if (!(pte & _PAGE_PRESENT)) {
		ERRMSG("Can't get a valid pte.\n");
		return NOT_PADDR;
	}
	return (PAGEBASE(pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(vaddr);
}