Beispiel #1
0
/*
 * On Assabet, we must probe for the Neponset board _before_
 * paging_init() has occurred to actually determine the amount
 * of RAM available.  To do so, we map the appropriate IO section
 * in the page table here in order to access GPIO registers.
 */
static void __init map_sa1100_gpio_regs( void )
{
	unsigned long phys = __PREG(GPLR) & PMD_MASK;
	unsigned long virt = io_p2v(phys);
	int prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO);
	pmd_t pmd;
	pmd_val(pmd) = phys | prot;
	set_pmd(pmd_offset(pgd_offset_k(virt), virt), pmd);
}
Beispiel #2
0
/*
 * On Assabet, we must probe for the Neponset board _before_
 * paging_init() has occurred to actually determine the amount
 * of RAM available.  To do so, we map the appropriate IO section
 * in the page table here in order to access GPIO registers.
 */
static void __init map_sa1100_gpio_regs( void )
{
	unsigned long phys = __PREG(GPLR) & PMD_MASK;
	unsigned long virt = (unsigned long)io_p2v(phys);
	int prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO);
	pmd_t *pmd;

	pmd = pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
	*pmd = __pmd(phys | prot);
	flush_pmd_entry(pmd);
}
/* cpd_load: This function loads a Page Directory Entry 'pmd' from the 'current'
 *   process into the CPD. It simply takes 'pmd' and tags it with the domain
 *   allocated to the region it is associated with. The result is then loaded
 *   into the required CPD entry. The function updates the cpd_count of the
 *   domain and does a coherence check if the CPD entry is being replaced, 
 *   i.e., it was previously allocated to another region.
 *
 * Notes:
 *   - mv_addr is the ARM PID relocated Modifed Virtual Address (MVA) seen by
 *     the FASS part of the kernel. Since the CPD is the only pg_dir walked by
 *     the hardware it is the only pg_dir that needs to do ARM PID relocation.
 */
void
cpd_load(pmd_t pmd, unsigned long mv_addr)
{
	struct domain_struct* domain_p =
		cpd_get_domain(current->mm, mva_to_va(mv_addr, current->mm));
	pmd_t* cpd_p = pmd_offset(pgd_offset_k(mv_addr), mv_addr);
	unsigned int index = CPD_P_TO_INDEX(cpd_p);
	int old_domain = pmd_domain(*cpd_p);

#if 0
	printk("** cpd_load: i %d old %d new %d , v_addr %lx, mv_addr %lx**\n", index, old_domain,
	       domain_p->number, mva_to_va(mv_addr, current->mm), mv_addr);
#endif
	if (old_domain) { /* CPD entry being replaced, TLB/Cache coherence needed */
		if (old_domain == domain_p->number)
		{
			printk("Arrgh.  cpd_load called from %p\b", __builtin_return_address(0));
			printk("        pmd_count = %d, pmd_val(pmd)= 0x%lx\n",
			       domain_p->cpd_count,
			       pmd_val(pmd));
		}
		fassert(old_domain != domain_p->number);
    
		/* update cpd_stats[] */
		cpd_stats[index].collisions++;
		cpd_stats_del(old_domain, cpd_stats + index);
    
		/* update domains[] */
		domains[old_domain].cpd_count--;    

		if (!cpd_is_domain_cache_coherent(old_domain)) {
      			cpd_cache_clean();
		} /* The cache clean is not required for ARM9, see cpd.h Notes */
    
		//if (!cpd_is_domain_tlb_coherent(old_domain)) {
		// This should be made into a targeted TLB flush.
		cpd_tlb_clean();
		//}
	}

	/* Update domains[] */
  
	domain_p->cpd_count++;
	cpd_stats_add(domain_p, cpd_stats + index);

	/* Update CPD */
	pmd_val(pmd) |= PMD_DOMAIN(domain_p->number); /* Tag CPD entry */
	cpu_set_pmd(cpd_p, pmd);
}
Beispiel #4
0
static int
remap_area_supersections(unsigned long virt, unsigned long pfn,
			 unsigned long size, unsigned long flags)
{
	unsigned long prot, addr = virt, end = virt + size;
	pgd_t *pgd;

	/*
	 * Remove and free any PTE-based mapping, and
	 * sync the current kernel mapping.
	 */
	unmap_area_sections(virt, size);

	prot = PMD_TYPE_SECT | PMD_SECT_SUPER | PMD_SECT_AP_WRITE |
			PMD_DOMAIN(DOMAIN_IO) |
			(flags & (L_PTE_CACHEABLE | L_PTE_BUFFERABLE));

	/*
	 * ARMv6 and above need XN set to prevent speculative prefetches
	 * hitting IO.
	 */
	if (cpu_architecture() >= CPU_ARCH_ARMv6)
		prot |= PMD_SECT_XN;

	pgd = pgd_offset_k(virt);
	do {
		unsigned long super_pmd_val, i;

		super_pmd_val = __pfn_to_phys(pfn) | prot;
		super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;

		for (i = 0; i < 8; i++) {
			pmd_t *pmd = pmd_offset(pgd, addr);

			pmd[0] = __pmd(super_pmd_val);
			pmd[1] = __pmd(super_pmd_val);
			flush_pmd_entry(pmd);

			addr += PGDIR_SIZE;
			pgd++;
		}

		pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
	} while (addr < end);

	return 0;
}
Beispiel #5
0
/*
 * Add a PAGE mapping between VIRT and PHYS in domain
 * DOMAIN with protection PROT.  Note that due to the
 * way we map the PTEs, we must allocate two PTE_SIZE'd
 * blocks - one for the Linux pte table, and one for
 * the hardware pte table.
 */
static inline void
alloc_init_page(unsigned long virt, unsigned long phys, int domain, int prot)
{
	pmd_t *pmdp;
	pte_t *ptep;

	pmdp = pmd_offset(pgd_offset_k(virt), virt);

	if (pmd_none(*pmdp)) {
		pte_t *ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE *
						      sizeof(pte_t));

		ptep += PTRS_PER_PTE;

		set_pmd(pmdp, __mk_pmd(ptep, PMD_TYPE_TABLE | PMD_DOMAIN(domain)));
	}
	ptep = pte_offset(pmdp, virt);

	set_pte(ptep, mk_pte_phys(phys, __pgprot(prot)));
}
/*
 * nonpaging_fill_spde
 * @spde
 * @pt
 * 
 */
static inline void nonpaging_fill_spde(u32 *spde, u32 *pt)
{
	u32 value = __pa(pt) | PMD_DOMAIN(DOMAIN_GUEST_KERNEL) | PMD_TYPE_TABLE ;
	fill_spde(spde, value);
	return;
}
Beispiel #7
0
/*
 * need to get a 16k page for level 1
 */
pgd_t *pgd_alloc(struct mm_struct *mm)
{
	pgd_t *new_pgd, *init_pgd;
	pud_t *new_pud, *init_pud;
	pmd_t *new_pmd, *init_pmd;
	pte_t *new_pte, *init_pte;

	new_pgd = __pgd_alloc();
	if (!new_pgd)
		goto no_pgd;

	memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));

	/*
	 * Copy over the kernel and IO PGD entries
	 */
	init_pgd = pgd_offset_k(0);
	memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD,
		       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));

	clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));

#ifdef CONFIG_ARM_LPAE
	/*
	 * Allocate PMD table for modules and pkmap mappings.
	 */
	new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR),
			    MODULES_VADDR);
	if (!new_pud)
		goto no_pud;

	new_pmd = pmd_alloc(mm, new_pud, 0);
	if (!new_pmd)
		goto no_pmd;
#endif

	if (!vectors_high()) {
		/*
		 * On ARM, first page must always be allocated since it
		 * contains the machine vectors. The vectors are always high
		 * with LPAE.
		 */
		new_pud = pud_alloc(mm, new_pgd, 0);
		if (!new_pud)
			goto no_pud;

		new_pmd = pmd_alloc(mm, new_pud, 0);
		if (!new_pmd)
			goto no_pmd;

		new_pte = pte_alloc_map(mm, new_pmd, 0);
		if (!new_pte)
			goto no_pte;

#ifndef CONFIG_ARM_LPAE
		/*
		 * Modify the PTE pointer to have the correct domain.  This
		 * needs to be the vectors domain to avoid the low vectors
		 * being unmapped.
		 */
		pmd_val(*new_pmd) &= ~PMD_DOMAIN_MASK;
		pmd_val(*new_pmd) |= PMD_DOMAIN(DOMAIN_VECTORS);
#endif

		init_pud = pud_offset(init_pgd, 0);
		init_pmd = pmd_offset(init_pud, 0);
		init_pte = pte_offset_map(init_pmd, 0);
		set_pte_ext(new_pte + 0, init_pte[0], 0);
		set_pte_ext(new_pte + 1, init_pte[1], 0);
		pte_unmap(init_pte);
		pte_unmap(new_pte);
	}

	return new_pgd;

no_pte:
	pmd_free(mm, new_pmd);
	mm_dec_nr_pmds(mm);
no_pmd:
	pud_free(mm, new_pud);
no_pud:
	__pgd_free(new_pgd);
no_pgd:
	return NULL;
}