Esempio n. 1
0
File: mmu.c Progetto: 1314cc/linux
static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
				  unsigned long end, unsigned long pfn,
				  pgprot_t prot,
				  phys_addr_t (*pgtable_alloc)(void))
{
	pte_t *pte;

	if (pmd_none(*pmd) || pmd_sect(*pmd)) {
		phys_addr_t pte_phys;
		BUG_ON(!pgtable_alloc);
		pte_phys = pgtable_alloc();
		pte = pte_set_fixmap(pte_phys);
		if (pmd_sect(*pmd))
			split_pmd(pmd, pte);
		__pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
		flush_tlb_all();
		pte_clear_fixmap();
	}
	BUG_ON(pmd_bad(*pmd));

	pte = pte_set_fixmap_offset(pmd, addr);
	do {
		set_pte(pte, pfn_pte(pfn, prot));
		pfn++;
	} while (pte++, addr += PAGE_SIZE, addr != end);

	pte_clear_fixmap();
}
/*
 * When built with CONFIG_DEBUG_PAGEALLOC and CONFIG_HIBERNATION, this function
 * is used to determine if a linear map page has been marked as not-valid by
 * CONFIG_DEBUG_PAGEALLOC. Walk the page table and check the PTE_VALID bit.
 * This is based on kern_addr_valid(), which almost does what we need.
 *
 * Because this is only called on the kernel linear map,  p?d_sect() implies
 * p?d_present(). When debug_pagealloc is enabled, sections mappings are
 * disabled.
 */
bool kernel_page_present(struct page *page)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	unsigned long addr = (unsigned long)page_address(page);

	pgd = pgd_offset_k(addr);
	if (pgd_none(*pgd))
		return false;

	pud = pud_offset(pgd, addr);
	if (pud_none(*pud))
		return false;
	if (pud_sect(*pud))
		return true;

	pmd = pmd_offset(pud, addr);
	if (pmd_none(*pmd))
		return false;
	if (pmd_sect(*pmd))
		return true;

	pte = pte_offset_kernel(pmd, addr);
	return pte_valid(*pte);
}
Esempio n. 3
0
static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
				   unsigned long end, unsigned long pfn,
				   pgprot_t prot)
{
	pmd_t *pmd;
	pte_t *pte;
	unsigned long addr, next;

	addr = start;
	do {
		pmd = pmd_offset(pud, addr);

		BUG_ON(pmd_sect(*pmd));

		if (pmd_none(*pmd)) {
			pte = pte_alloc_one_kernel(NULL, addr);
			if (!pte) {
				kvm_err("Cannot allocate Hyp pte\n");
				return -ENOMEM;
			}
			pmd_populate_kernel(NULL, pmd, pte);
			get_page(virt_to_page(pmd));
			kvm_flush_dcache_to_poc(pmd, sizeof(*pmd));
		}

		next = pmd_addr_end(addr, end);

		create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
		pfn += (next - addr) >> PAGE_SHIFT;
	} while (addr = next, addr != end);

	return 0;
}
Esempio n. 4
0
/*
 * When built with CONFIG_DEBUG_PAGEALLOC and CONFIG_HIBERNATION, this function
 * is used to determine if a linear map page has been marked as not-valid by
 * CONFIG_DEBUG_PAGEALLOC. Walk the page table and check the PTE_VALID bit.
 * This is based on kern_addr_valid(), which almost does what we need.
 *
 * Because this is only called on the kernel linear map,  p?d_sect() implies
 * p?d_present(). When debug_pagealloc is enabled, sections mappings are
 * disabled.
 */
bool kernel_page_present(struct page *page)
{
	pgd_t *pgdp;
	pud_t *pudp, pud;
	pmd_t *pmdp, pmd;
	pte_t *ptep;
	unsigned long addr = (unsigned long)page_address(page);

	pgdp = pgd_offset_k(addr);
	if (pgd_none(READ_ONCE(*pgdp)))
		return false;

	pudp = pud_offset(pgdp, addr);
	pud = READ_ONCE(*pudp);
	if (pud_none(pud))
		return false;
	if (pud_sect(pud))
		return true;

	pmdp = pmd_offset(pudp, addr);
	pmd = READ_ONCE(*pmdp);
	if (pmd_none(pmd))
		return false;
	if (pmd_sect(pmd))
		return true;

	ptep = pte_offset_kernel(pmdp, addr);
	return pte_valid(READ_ONCE(*ptep));
}
Esempio n. 5
0
static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
				  unsigned long end, unsigned long pfn,
				  pgprot_t prot,
				  void *(*alloc)(unsigned long size))
{
	pte_t *pte;

	if (pmd_none(*pmd) || pmd_sect(*pmd)) {
		pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
		if (pmd_sect(*pmd))
			split_pmd(pmd, pte);
		__pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
		flush_tlb_all();
	}
	BUG_ON(pmd_bad(*pmd));

	pte = pte_offset_kernel(pmd, addr);
	do {
		set_pte(pte, pfn_pte(pfn, prot));
		pfn++;
	} while (pte++, addr += PAGE_SIZE, addr != end);
}
Esempio n. 6
0
static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
{
        pmd_t *pmd = pmd_offset(pud, 0);
        unsigned long addr;
        unsigned i;

        for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
                addr = start + i * PMD_SIZE;
#ifdef CONFIG_ARM64
		if (pmd_none(*pmd) || pmd_sect (*pmd)) {
#else
		if (pmd_none(*pmd) || pmd_large(*pmd) || !pmd_present(*pmd)) {
#endif
                        note_page(st, addr, 3, pmd_val(*pmd));
                } else {
                        walk_pte(st, pmd, addr);
		}
#ifdef CONFIG_ARM
                if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1])) 
                        note_page(st, addr + SECTION_SIZE, 3, pmd_val(pmd[1]));
#endif
        }
}

static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
{
        pud_t *pud = pud_offset(pgd, 0);
        unsigned long addr;
        unsigned i;

        for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
                addr = start + i * PUD_SIZE;
#if defined CONFIG_ARM64 && !defined (CONFIG_ANDROID)
		if (pud_none (*pud) || pud_sect (*pud)) {
			note_page (st, addr, 2, pud_val (*pud));
		} else {
		       	walk_pmd (st, pud, addr);
		}
#else
                if (!pud_none(*pud)) {
			walk_pmd (st, pud, addr);
		} else {
			note_page (st, addr, 2, pud_val (*pud));
		}
#endif
        }
}