void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
			      unsigned long end)
{
	unsigned long flags;

	start = _ALIGN_DOWN(start, PAGE_SIZE);
	end = _ALIGN_UP(end, PAGE_SIZE);

	BUG_ON(!mm->pgd);

	/* Note: Normally, we should only ever use a batch within a
	 * PTE locked section. This violates the rule, but will work
	 * since we don't actually modify the PTEs, we just flush the
	 * hash while leaving the PTEs intact (including their reference
	 * to being hashed). This is not the most performance oriented
	 * way to do things but is fine for our needs here.
	 */
	local_irq_save(flags);
	arch_enter_lazy_mmu_mode();
	for (; start < end; start += PAGE_SIZE) {
		pte_t *ptep = find_linux_pte(mm->pgd, start);
		unsigned long pte;

		if (ptep == NULL)
			continue;
		pte = pte_val(*ptep);
		if (!(pte & _PAGE_HASHPTE))
			continue;
		hpte_need_flush(mm, start, ptep, pte, 0);
	}
	arch_leave_lazy_mmu_mode();
	local_irq_restore(flags);
}
Exemple #2
0
struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
{
    struct iowa_bus *bus;
    int token;

    token = PCI_GET_ADDR_TOKEN(addr);

    if (token && token <= iowa_bus_count)
        bus = &iowa_busses[token - 1];
    else {
        unsigned long vaddr, paddr;
        pte_t *ptep;

        vaddr = (unsigned long)PCI_FIX_ADDR(addr);
        if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END)
            return NULL;

        ptep = find_linux_pte(init_mm.pgd, vaddr);
        if (ptep == NULL)
            paddr = 0;
        else
            paddr = pte_pfn(*ptep) << PAGE_SHIFT;
        bus = iowa_pci_find(vaddr, paddr);

        if (bus == NULL)
            return NULL;
    }

    return bus;
}
Exemple #3
0
unsigned long eeh_token_to_phys(unsigned long token)
{
	if (REGION_ID(token) == EEH_REGION_ID) {
		unsigned long vaddr = IO_TOKEN_TO_ADDR(token);
		pte_t *ptep = find_linux_pte(ioremap_mm.pgd, vaddr);
		unsigned long pa = pte_pfn(*ptep) << PAGE_SHIFT;
		return pa | (vaddr & (PAGE_SIZE-1));
	} else
		return token;
}
Exemple #4
0
void hash_preload(struct mm_struct *mm, unsigned long ea,
		  unsigned long access, unsigned long trap)
{
	unsigned long vsid;
	void *pgdir;
	pte_t *ptep;
	cpumask_t mask;
	unsigned long flags;
	int local = 0;

	/* We don't want huge pages prefaulted for now
	 */
	if (unlikely(in_hugepage_area(mm->context, ea)))
		return;

	DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx,"
		" trap=%lx\n", mm, mm->pgd, ea, access, trap);

	/* Get PTE, VSID, access mask */
	pgdir = mm->pgd;
	if (pgdir == NULL)
		return;
	ptep = find_linux_pte(pgdir, ea);
	if (!ptep)
		return;
	vsid = get_vsid(mm->context.id, ea);

	/* Hash it in */
	local_irq_save(flags);
	mask = cpumask_of_cpu(smp_processor_id());
	if (cpus_equal(mm->cpu_vm_mask, mask))
		local = 1;
#ifndef CONFIG_PPC_64K_PAGES
	__hash_page_4K(ea, access, vsid, ptep, trap, local);
#else
	if (mmu_ci_restrictions) {
		/* If this PTE is non-cacheable, switch to 4k */
		if (mm->context.user_psize == MMU_PAGE_64K &&
		    (pte_val(*ptep) & _PAGE_NO_CACHE)) {
			mm->context.user_psize = MMU_PAGE_4K;
			mm->context.sllp = SLB_VSID_USER |
				mmu_psize_defs[MMU_PAGE_4K].sllp;
			get_paca()->context = mm->context;
			slb_flush_and_rebolt();
		}
	}
	if (mm->context.user_psize == MMU_PAGE_64K)
		__hash_page_64K(ea, access, vsid, ptep, trap, local);
	else
		__hash_page_4K(ea, access, vsid, ptep, trap, local);
#endif /* CONFIG_PPC_64K_PAGES */
	local_irq_restore(flags);
}
Exemple #5
0
/**
 * eeh_token_to_phys - convert EEH address token to phys address
 * @token i/o token, should be address in the form 0xA....
 */
static inline unsigned long eeh_token_to_phys(unsigned long token)
{
	pte_t *ptep;
	unsigned long pa;

	ptep = find_linux_pte(init_mm.pgd, token);
	if (!ptep)
		return token;
	pa = pte_pfn(*ptep) << PAGE_SHIFT;

	return pa | (token & (PAGE_SIZE-1));
}
/*
 * On 64-bit we don't want to invoke hash_page on user addresses from
 * interrupt context, so if the access faults, we read the page tables
 * to find which page (if any) is mapped and access it directly.
 */
static int read_user_stack_slow(void __user *ptr, void *ret, int nb)
{
	pgd_t *pgdir;
	pte_t *ptep, pte;
	int pagesize;
	unsigned long addr = (unsigned long) ptr;
	unsigned long offset;
	unsigned long pfn;
	void *kaddr;

	pgdir = current->mm->pgd;
	if (!pgdir)
		return -EFAULT;

	pagesize = get_slice_psize(current->mm, addr);

	/* align address to page boundary */
	offset = addr & ((1ul << mmu_psize_defs[pagesize].shift) - 1);
	addr -= offset;

	if (is_huge_psize(pagesize))
		ptep = huge_pte_offset(current->mm, addr);
	else
		ptep = find_linux_pte(pgdir, addr);

	if (ptep == NULL)
		return -EFAULT;
	pte = *ptep;
	if (!pte_present(pte) || !(pte_val(pte) & _PAGE_USER))
		return -EFAULT;
	pfn = pte_pfn(pte);
	if (!page_is_ram(pfn))
		return -EFAULT;

	/* no highmem to worry about here */
	kaddr = pfn_to_kaddr(pfn);
	memcpy(ret, kaddr + offset, nb);
	return 0;
}
Exemple #7
0
/* Result code is:
 *  0 - handled
 *  1 - normal page fault
 * -1 - critical hash insertion error
 */
int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
{
	void *pgdir;
	unsigned long vsid;
	struct mm_struct *mm;
	pte_t *ptep;
	cpumask_t tmp;
	int rc, user_region = 0, local = 0;
	int psize;

	DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
		ea, access, trap);

	if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) {
		DBG_LOW(" out of pgtable range !\n");
 		return 1;
	}

	/* Get region & vsid */
 	switch (REGION_ID(ea)) {
	case USER_REGION_ID:
		user_region = 1;
		mm = current->mm;
		if (! mm) {
			DBG_LOW(" user region with no mm !\n");
			return 1;
		}
		vsid = get_vsid(mm->context.id, ea);
		psize = mm->context.user_psize;
		break;
	case VMALLOC_REGION_ID:
		mm = &init_mm;
		vsid = get_kernel_vsid(ea);
		if (ea < VMALLOC_END)
			psize = mmu_vmalloc_psize;
		else
			psize = mmu_io_psize;
		break;
	default:
		/* Not a valid range
		 * Send the problem up to do_page_fault 
		 */
		return 1;
	}
	DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);

	/* Get pgdir */
	pgdir = mm->pgd;
	if (pgdir == NULL)
		return 1;

	/* Check CPU locality */
	tmp = cpumask_of_cpu(smp_processor_id());
	if (user_region && cpus_equal(mm->cpu_vm_mask, tmp))
		local = 1;

	/* Handle hugepage regions */
	if (unlikely(in_hugepage_area(mm->context, ea))) {
		DBG_LOW(" -> huge page !\n");
		return hash_huge_page(mm, access, ea, vsid, local, trap);
	}

	/* Get PTE and page size from page tables */
	ptep = find_linux_pte(pgdir, ea);
	if (ptep == NULL || !pte_present(*ptep)) {
		DBG_LOW(" no PTE !\n");
		return 1;
	}

#ifndef CONFIG_PPC_64K_PAGES
	DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep));
#else
	DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep),
		pte_val(*(ptep + PTRS_PER_PTE)));
#endif
	/* Pre-check access permissions (will be re-checked atomically
	 * in __hash_page_XX but this pre-check is a fast path
	 */
	if (access & ~pte_val(*ptep)) {
		DBG_LOW(" no access !\n");
		return 1;
	}

	/* Do actual hashing */
#ifndef CONFIG_PPC_64K_PAGES
	rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
#else
	if (mmu_ci_restrictions) {
		/* If this PTE is non-cacheable, switch to 4k */
		if (psize == MMU_PAGE_64K &&
		    (pte_val(*ptep) & _PAGE_NO_CACHE)) {
			if (user_region) {
				psize = MMU_PAGE_4K;
				mm->context.user_psize = MMU_PAGE_4K;
				mm->context.sllp = SLB_VSID_USER |
					mmu_psize_defs[MMU_PAGE_4K].sllp;
			} else if (ea < VMALLOC_END) {
				/*
				 * some driver did a non-cacheable mapping
				 * in vmalloc space, so switch vmalloc
				 * to 4k pages
				 */
				printk(KERN_ALERT "Reducing vmalloc segment "
				       "to 4kB pages because of "
				       "non-cacheable mapping\n");
				psize = mmu_vmalloc_psize = MMU_PAGE_4K;
			}
		}
		if (user_region) {
			if (psize != get_paca()->context.user_psize) {
				get_paca()->context = mm->context;
				slb_flush_and_rebolt();
			}
		} else if (get_paca()->vmalloc_sllp !=
			   mmu_psize_defs[mmu_vmalloc_psize].sllp) {
			get_paca()->vmalloc_sllp =
				mmu_psize_defs[mmu_vmalloc_psize].sllp;
			slb_flush_and_rebolt();
		}
	}
	if (psize == MMU_PAGE_64K)
		rc = __hash_page_64K(ea, access, vsid, ptep, trap, local);
	else
		rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
#endif /* CONFIG_PPC_64K_PAGES */

#ifndef CONFIG_PPC_64K_PAGES
	DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
#else
	DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep),
		pte_val(*(ptep + PTRS_PER_PTE)));
#endif
	DBG_LOW(" -> rc=%d\n", rc);
	return rc;
}