Beispiel #1
0
/*
 * This is called at the end of handling a user page fault, when the
 * fault has been handled by updating a PTE in the linux page tables.
 * We use it to preload an HPTE into the hash table corresponding to
 * the updated linux PTE.
 * 
 * This must always be called with the pte lock held.
 */
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
		      pte_t pte)
{
#ifdef CONFIG_PPC_STD_MMU
	unsigned long access = 0, trap;
#endif
	unsigned long pfn = pte_pfn(pte);

	/* handle i-cache coherency */
	if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
	    !cpu_has_feature(CPU_FTR_NOEXECUTE) &&
	    pfn_valid(pfn)) {
		struct page *page = pfn_to_page(pfn);
#ifdef CONFIG_8xx
		/* On 8xx, cache control instructions (particularly
		 * "dcbst" from flush_dcache_icache) fault as write
		 * operation if there is an unpopulated TLB entry
		 * for the address in question. To workaround that,
		 * we invalidate the TLB here, thus avoiding dcbst
		 * misbehaviour.
		 */
		_tlbie(address);
#endif
		if (!PageReserved(page)
		    && !test_bit(PG_arch_1, &page->flags)) {
			if (vma->vm_mm == current->active_mm) {
				__flush_dcache_icache((void *) address);
			} else
				flush_dcache_icache_page(page);
			set_bit(PG_arch_1, &page->flags);
		}
	}

#ifdef CONFIG_PPC_STD_MMU
	/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
	if (!pte_young(pte) || address >= TASK_SIZE)
		return;

	/* We try to figure out if we are coming from an instruction
	 * access fault and pass that down to __hash_page so we avoid
	 * double-faulting on execution of fresh text. We have to test
	 * for regs NULL since init will get here first thing at boot
	 *
	 * We also avoid filling the hash if not coming from a fault
	 */
	if (current->thread.regs == NULL)
		return;
	trap = TRAP(current->thread.regs);
	if (trap == 0x400)
		access |= _PAGE_EXEC;
	else if (trap != 0x300)
		return;
	hash_preload(vma->vm_mm, address, access, trap);
#endif /* CONFIG_PPC_STD_MMU */
}
Beispiel #2
0
static void __init
ebony_early_serial_map(void)
{
	struct uart_port port;

	/* Setup ioremapped serial port access */
	memset(&port, 0, sizeof(port));
	port.membase = ioremap64(PPC440GP_UART0_ADDR, 8);
	port.irq = 0;
	port.uartclk = clocks.uart0;
	port.regshift = 0;
	port.iotype = SERIAL_IO_MEM;
	port.flags = ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST;
	port.line = 0;

#ifdef CONFIG_SERIAL_8250
	if (early_serial_setup(&port) != 0)
		printk("Early serial init of port 0 failed\n");
#endif

#ifdef CONFIG_SERIAL_TEXT_DEBUG
	/* Configure debug serial access */
	gen550_init(0, &port);
#endif
#ifdef CONFIG_KGDB_8250
	kgdb8250_add_port(0, &port);
#endif

#if defined(CONFIG_SERIAL_TEXT_DEBUG) || defined(CONFIG_KGDB_8250)
	/* Purge TLB entry added in head_44x.S for early serial access */
	_tlbie(UART0_IO_BASE);
#endif

	port.membase = ioremap64(PPC440GP_UART1_ADDR, 8);
	port.irq = 1;
	port.uartclk = clocks.uart1;
	port.line = 1;

#ifdef CONFIG_SERIAL_8250
	if (early_serial_setup(&port) != 1)
		printk("Early serial init of port 1 failed\n");
#endif

#ifdef CONFIG_SERIAL_TEXT_DEBUG
	/* Configure debug serial access */
	gen550_init(1, &port);
#endif
#ifdef CONFIG_KGDB_8250
	kgdb8250_add_port(1, &port);
#endif
}
Beispiel #3
0
int map_page(unsigned long va, phys_addr_t pa, int flags)
{
	pmd_t *pd;
	pte_t *pg;
	int err = -ENOMEM;
	/* Use upper 10 bits of VA to index the first level map */
	pd = pmd_offset(pgd_offset_k(va), va);
	/* Use middle 10 bits of VA to index the second-level map */
	pg = pte_alloc_kernel(pd, va); /* from powerpc - pgtable.c */
	/* pg = pte_alloc_kernel(&init_mm, pd, va); */

	if (pg != NULL) {
		err = 0;
		set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
				__pgprot(flags)));
		if (unlikely(mem_init_done))
			_tlbie(va);
	}
Beispiel #4
0
static void __init
ocotea_early_serial_map(void)
{
    struct uart_port port;

    /* Setup ioremapped serial port access */
    memset(&port, 0, sizeof(port));
    port.membase = ioremap64(PPC440GX_UART0_ADDR, 8);
    port.irq = UART0_INT;
    port.uartclk = clocks.uart0;
    port.regshift = 0;
    port.iotype = UPIO_MEM;
    port.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST;
    port.line = 0;

    if (early_serial_setup(&port) != 0) {
        printk("Early serial init of port 0 failed\n");
    }

#if defined(CONFIG_SERIAL_TEXT_DEBUG) || defined(CONFIG_KGDB)
    /* Configure debug serial access */
    gen550_init(0, &port);

    /* Purge TLB entry added in head_44x.S for early serial access */
    _tlbie(UART0_IO_BASE, 0);
#endif

    port.membase = ioremap64(PPC440GX_UART1_ADDR, 8);
    port.irq = UART1_INT;
    port.uartclk = clocks.uart1;
    port.line = 1;

    if (early_serial_setup(&port) != 0) {
        printk("Early serial init of port 1 failed\n");
    }

#if defined(CONFIG_SERIAL_TEXT_DEBUG) || defined(CONFIG_KGDB)
    /* Configure debug serial access */
    gen550_init(1, &port);
#endif
}
Beispiel #5
0
void
local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
	struct mm_struct *mm;
	pmd_t *pmd;
	pte_t *pte;

	if (Hash == 0) {
		_tlbie(vmaddr);
		return;
	}
	mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
	pmd = pmd_offset(pgd_offset(mm, vmaddr), vmaddr);
	if (!pmd_none(*pmd)) {
		pte = pte_offset(pmd, vmaddr);
		if (pte_val(*pte) & _PAGE_HASHPTE)
			flush_hash_page(mm->context, vmaddr, pte);
	}
#ifdef CONFIG_SMP
	smp_send_tlb_invalidate(0);
#endif	
}
Beispiel #6
0
/*
 * Called by ptep_set_access_flags, must flush on CPUs for which the
 * DSI handler can't just "fixup" the TLB on a write fault
 */
void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr)
{
	if (Hash != 0)
		return;
	_tlbie(addr);
}