static void unmap_area_sections(unsigned long virt, unsigned long size)
{
    unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
    pgd_t *pgd;
    pud_t *pud;
    pmd_t *pmdp;

    flush_cache_vunmap(addr, end);
    pgd = pgd_offset_k(addr);
    pud = pud_offset(pgd, addr);
    pmdp = pmd_offset(pud, addr);
    do {
        pmd_t pmd = *pmdp;

        if (!pmd_none(pmd)) {
            pmd_clear(pmdp);
            init_mm.context.kvm_seq++;

            if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
                pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
        }

        addr += PMD_SIZE;
        pmdp += 2;
    } while (addr < end);

    if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
        __check_kvm_seq(current->active_mm);

    flush_tlb_kernel_range(virt, end);
}
Beispiel #2
0
/*
 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
 * the other CPUs will not see this change until their next context switch.
 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
 * which requires the new ioremap'd region to be referenced, the CPU will
 * reference the _old_ region.
 *
 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
 * mask the size back to 4MB aligned or we will overflow in the loop below.
 */
static void unmap_area_sections(unsigned long virt, unsigned long size)
{
	unsigned long addr = virt, end = virt + (size & ~(SZ_4M - 1));
	pgd_t *pgd;

	flush_cache_vunmap(addr, end);
	pgd = pgd_offset_k(addr);
	do {
		pmd_t pmd, *pmdp = pmd_offset((pud_t *)pgd, addr);

		pmd = *pmdp;
		if (!pmd_none(pmd)) {
			/*
			 * Clear the PMD from the page table, and
			 * increment the kvm sequence so others
			 * notice this change.
			 *
			 * Note: this is still racy on SMP machines.
			 */
			pmd_clear(pmdp);

			/*
			 * Free the page table, if there was one.
			 */
			if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
				pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
		}

		addr += PGDIR_SIZE;
		pgd++;
	} while (addr < end);

	flush_tlb_kernel_range(virt, end);
}
Beispiel #3
0
/*
 * Place a pointer to an L2 page table in a middle page
 * directory entry.
 */
static void __init assign_pte(pmd_t *pmd, pte_t *page_table)
{
	phys_addr_t pa = __pa(page_table);
	unsigned long l2_ptfn = pa >> HV_LOG2_PAGE_TABLE_ALIGN;
	pte_t pteval = hv_pte_set_ptfn(__pgprot(_PAGE_TABLE), l2_ptfn);
	BUG_ON((pa & (HV_PAGE_TABLE_ALIGN-1)) != 0);
	pteval = pte_set_home(pteval, initial_heap_home());
	*(pte_t *)pmd = pteval;
	if (page_table != (pte_t *)pmd_page_vaddr(*pmd))
		BUG();
}
Beispiel #4
0
static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
			 unsigned long kernel)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

	pgd = pgd_offset(mm, proc);
	pud = pud_alloc(mm, pgd, proc);
	if (!pud)
		goto out;

	pmd = pmd_alloc(mm, pud, proc);
	if (!pmd)
		goto out_pmd;

	pte = pte_alloc_map(mm, pmd, proc);
	if (!pte)
		goto out_pte;

	/* There's an interaction between the skas0 stub pages, stack
	 * randomization, and the BUG at the end of exit_mmap.  exit_mmap
         * checks that the number of page tables freed is the same as had
         * been allocated.  If the stack is on the last page table page,
	 * then the stack pte page will be freed, and if not, it won't.  To
	 * avoid having to know where the stack is, or if the process mapped
	 * something at the top of its address space for some other reason,
	 * we set TASK_SIZE to end at the start of the last page table.
	 * This keeps exit_mmap off the last page, but introduces a leak
	 * of that page.  So, we hang onto it here and free it in
	 * destroy_context_skas.
	 */

        mm->context.skas.last_page_table = pmd_page_vaddr(*pmd);
#ifdef CONFIG_3_LEVEL_PGTABLES
        mm->context.skas.last_pmd = (unsigned long) __va(pud_val(*pud));
#endif

	*pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
	*pte = pte_mkexec(*pte);
	*pte = pte_wrprotect(*pte);
	return(0);

 out_pmd:
	pud_free(pud);
 out_pte:
	pmd_free(pmd);
 out:
	return(-ENOMEM);
}
Beispiel #5
0
static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr,
							unsigned long P)
{
	int i;
	pte_t *start;

	start = (pte_t *) pmd_page_vaddr(addr);
	for (i = 0; i < PTRS_PER_PTE; i++) {
		pgprot_t prot = pte_pgprot(*start);

		st->current_address = normalize_addr(P + i * PTE_LEVEL_MULT);
		note_page(m, st, prot, 4);
		start++;
	}
}
Beispiel #6
0
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
#ifdef CONFIG_HUGETLB_SUPER_PAGES
	pte_t *pte;
#endif

	/* Get the top-level page table entry. */
	pgd = (pgd_t *)get_pte((pte_t *)mm->pgd, pgd_index(addr), 0);

	/* We don't have four levels. */
	pud = pud_offset(pgd, addr);
#ifndef __PAGETABLE_PUD_FOLDED
# error support fourth page table level
#endif
	if (!pud_present(*pud))
		return NULL;

	/* Check for an L0 huge PTE, if we have three levels. */
#ifndef __PAGETABLE_PMD_FOLDED
	if (pud_huge(*pud))
		return (pte_t *)pud;

	pmd = (pmd_t *)get_pte((pte_t *)pud_page_vaddr(*pud),
			       pmd_index(addr), 1);
	if (!pmd_present(*pmd))
		return NULL;
#else
	pmd = pmd_offset(pud, addr);
#endif

	/* Check for an L1 huge PTE. */
	if (pmd_huge(*pmd))
		return (pte_t *)pmd;

#ifdef CONFIG_HUGETLB_SUPER_PAGES
	/* Check for an L2 huge PTE. */
	pte = get_pte((pte_t *)pmd_page_vaddr(*pmd), pte_index(addr), 2);
	if (!pte_present(*pte))
		return NULL;
	if (pte_super(*pte))
		return pte;
#endif

	return NULL;
}
Beispiel #7
0
/*
 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
 * the other CPUs will not see this change until their next context switch.
 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
 * which requires the new ioremap'd region to be referenced, the CPU will
 * reference the _old_ region.
 *
 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
 * mask the size back to 1MB aligned or we will overflow in the loop below.
 */
static void unmap_area_sections(unsigned long virt, unsigned long size)
{
	unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmdp;

	flush_cache_vunmap(addr, end);
	pgd = pgd_offset_k(addr);
	pud = pud_offset(pgd, addr);
	pmdp = pmd_offset(pud, addr);
	do {
		pmd_t pmd = *pmdp;

		if (!pmd_none(pmd)) {
			/*
			 * Clear the PMD from the page table, and
			 * increment the kvm sequence so others
			 * notice this change.
			 *
			 * Note: this is still racy on SMP machines.
			 */
			pmd_clear(pmdp);
			init_mm.context.kvm_seq++;

			/*
			 * Free the page table, if there was one.
			 */
			if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
				pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
		}

		addr += PMD_SIZE;
		pmdp += 2;
	} while (addr < end);

	/*
	 * Ensure that the active_mm is up to date - we want to
	 * catch any use-after-iounmap cases.
	 */
	if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
		__check_kvm_seq(current->active_mm);

	flush_tlb_kernel_range(virt, end);
}
Beispiel #8
0
int exit_mm(struct mm_struct *mm)
{
	pmd_t *pmd;
	pgd_t *pgd;
	uint32_t pgdno, pmdno;
	physaddr_t pa;

	struct vm_area_struct* vma = mm->mmap;
	struct page *page;
	
	if(!mm || !mm->mm_pgd)
		return 0;

	if(!atomic_dec_and_test(&mm->mm_count))
		return 0;

	delete_all_vma(mm);

	for (pgdno = 0; pgdno < pgd_index(KERNEL_BASE_ADDR); pgdno++) {
		pgd = mm->mm_pgd + pgdno;
		if(!pgd_present(*pgd) || pgd_none(*pgd))
			continue;
		pmd_t* tmp = (pmd_t *)pgd_page_vaddr(*pgd);
		
		for (pmdno = 0; pmdno < PTRS_PER_PMD; pmdno++) {
			pmd = tmp +  pmdno;
			if(!pmd_present(*pmd) || pmd_none(*pmd))
				continue;
			struct page* p = virt2page(pmd_page_vaddr(*pmd));
			page_decref(p);
			pmd_set(pmd,0,0);
		}
		struct page* p = virt2page(pgd_page_vaddr(*pgd));
		page_decref(p);
		pgd_set(pgd,0,0);
	}

	page = virt2page((viraddr_t)mm->mm_pgd);
	page_free(page);
	kfree(mm);

	return 0;
}
static void unmap_area_sections(unsigned long virt, unsigned long size)
{
	unsigned long addr = virt, end = virt + (size & ~(SZ_4M - 1));
	pgd_t *pgd;

	flush_cache_vunmap(addr, end);
	pgd = pgd_offset_k(addr);
	do {
		pmd_t pmd, *pmdp = pmd_offset((pud_t *)pgd, addr);

		pmd = *pmdp;
		if (!pmd_none(pmd)) {
			pmd_clear(pmdp);

			if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
				pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
		}

		addr += PGDIR_SIZE;
		pgd++;
	} while (addr < end);

	flush_tlb_kernel_range(virt, end);
}
Beispiel #10
0
/*
 * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags
 * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address.
 */
struct page *pmd_page(pmd_t pmd)
{
	if (pmd_trans_huge(pmd) || pmd_huge(pmd) || pmd_devmap(pmd))
		return pte_page(pmd_pte(pmd));
	return virt_to_page(pmd_page_vaddr(pmd));
}
Beispiel #11
0
pte_t *my_pte_offset_kernel(pmd_t *pmd, unsigned long address)
{
	return (pte_t *)pmd_page_vaddr(*pmd) + my_pte_index(address);
}