static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) { pr_debug("%s, %lx, %lx\n", __FUNCTION__, dsisr, ea); /* Handle kernel space hash faults immediately. User hash faults need to be deferred to process context. */ if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) && REGION_ID(ea) != USER_REGION_ID && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) { spu_restart_dma(spu); return 0; } if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) { printk("%s: invalid access during switch!\n", __func__); return 1; } spu->dar = ea; spu->dsisr = dsisr; mb(); if (spu->stop_callback) spu->stop_callback(spu); return 0; }
/* * Update the MMU hash table to correspond with a change to * a Linux PTE. If wrprot is true, it is permissible to * change the existing HPTE to read-only rather than removing it * (if we remove it we should clear the _PTE_HPTEFLAGS bits). */ void hpte_update(struct mm_struct *mm, unsigned long addr, unsigned long pte, int wrprot) { int i; unsigned long context = 0; struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); if (REGION_ID(addr) == USER_REGION_ID) context = mm->context.id; i = batch->index; /* * This can happen when we are in the middle of a TLB batch and * we encounter memory pressure (eg copy_page_range when it tries * to allocate a new pte). If we have to reclaim memory and end * up scanning and resetting referenced bits then our batch context * will change mid stream. */ if (i != 0 && (context != batch->context || batch->large != pte_huge(pte))) { flush_tlb_pending(); i = 0; } if (i == 0) { batch->context = context; batch->mm = mm; batch->large = pte_huge(pte); } batch->pte[i] = __pte(pte); batch->addr[i] = addr; batch->index = ++i; if (i >= PPC64_TLB_BATCH_NR) flush_tlb_pending(); }
unsigned long eeh_token_to_phys(unsigned long token) { if (REGION_ID(token) == EEH_REGION_ID) { unsigned long vaddr = IO_TOKEN_TO_ADDR(token); pte_t *ptep = find_linux_pte(ioremap_mm.pgd, vaddr); unsigned long pa = pte_pfn(*ptep) << PAGE_SHIFT; return pa | (vaddr & (PAGE_SIZE-1)); } else return token; }
int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb) { u64 vsid, vsidkey; int psize, ssize; switch (REGION_ID(ea)) { case USER_REGION_ID: pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea); if (mm == NULL) return 1; psize = get_slice_psize(mm, ea); ssize = user_segment_size(ea); vsid = get_user_vsid(&mm->context, ea, ssize); vsidkey = SLB_VSID_USER; break; case VMALLOC_REGION_ID: pr_devel("%s: 0x%llx -- VMALLOC_REGION_ID\n", __func__, ea); if (ea < VMALLOC_END) psize = mmu_vmalloc_psize; else psize = mmu_io_psize; ssize = mmu_kernel_ssize; vsid = get_kernel_vsid(ea, mmu_kernel_ssize); vsidkey = SLB_VSID_KERNEL; break; case KERNEL_REGION_ID: pr_devel("%s: 0x%llx -- KERNEL_REGION_ID\n", __func__, ea); psize = mmu_linear_psize; ssize = mmu_kernel_ssize; vsid = get_kernel_vsid(ea, mmu_kernel_ssize); vsidkey = SLB_VSID_KERNEL; break; default: pr_debug("%s: invalid region access at %016llx\n", __func__, ea); return 1; } /* Bad address */ if (!vsid) return 1; vsid = (vsid << slb_vsid_shift(ssize)) | vsidkey; vsid |= mmu_psize_defs[psize].sllp | ((ssize == MMU_SEGSIZE_1T) ? SLB_VSID_B_1T : 0); slb->esid = (ea & (ssize == MMU_SEGSIZE_1T ? ESID_MASK_1T : ESID_MASK)) | SLB_ESID_V; slb->vsid = vsid; return 0; }
/* * Update the MMU hash table to correspond with a change to * a Linux PTE. If wrprot is true, it is permissible to * change the existing HPTE to read-only rather than removing it * (if we remove it we should clear the _PTE_HPTEFLAGS bits). */ void hpte_update(pte_t *ptep, unsigned long pte, int wrprot) { struct page *ptepage; struct mm_struct *mm; unsigned long addr; int i; unsigned long context = 0; struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); ptepage = virt_to_page(ptep); mm = (struct mm_struct *) ptepage->mapping; addr = ptepage->index + (((unsigned long)ptep & ~PAGE_MASK) * PTRS_PER_PTE); if (REGION_ID(addr) == USER_REGION_ID) context = mm->context.id; i = batch->index; /* * This can happen when we are in the middle of a TLB batch and * we encounter memory pressure (eg copy_page_range when it tries * to allocate a new pte). If we have to reclaim memory and end * up scanning and resetting referenced bits then our batch context * will change mid stream. */ if (unlikely(i != 0 && context != batch->context)) { flush_tlb_pending(); i = 0; } if (i == 0) { batch->context = context; batch->mm = mm; } batch->pte[i] = __pte(pte); batch->addr[i] = addr; batch->index = ++i; if (i >= PPC64_TLB_BATCH_NR) flush_tlb_pending(); }
static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) { struct spu_priv2 __iomem *priv2 = spu->priv2; struct mm_struct *mm = spu->mm; u64 esid, vsid; pr_debug("%s\n", __FUNCTION__); if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) { /* SLBs are pre-loaded for context switch, so * we should never get here! */ printk("%s: invalid access during switch!\n", __func__); return 1; } if (!mm || (REGION_ID(ea) != USER_REGION_ID)) { /* Future: support kernel segments so that drivers * can use SPUs. */ pr_debug("invalid region access at %016lx\n", ea); return 1; } esid = (ea & ESID_MASK) | SLB_ESID_V; vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) | SLB_VSID_USER; if (in_hugepage_area(mm->context, ea)) vsid |= SLB_VSID_L; out_be64(&priv2->slb_index_W, spu->slb_replace); out_be64(&priv2->slb_vsid_RW, vsid); out_be64(&priv2->slb_esid_RW, esid); spu->slb_replace++; if (spu->slb_replace >= 8) spu->slb_replace = 0; spu_restart_dma(spu); return 0; }
/* Result code is: * 0 - handled * 1 - normal page fault * -1 - critical hash insertion error */ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) { void *pgdir; unsigned long vsid; struct mm_struct *mm; pte_t *ptep; cpumask_t tmp; int rc, user_region = 0, local = 0; int psize; DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", ea, access, trap); if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) { DBG_LOW(" out of pgtable range !\n"); return 1; } /* Get region & vsid */ switch (REGION_ID(ea)) { case USER_REGION_ID: user_region = 1; mm = current->mm; if (! mm) { DBG_LOW(" user region with no mm !\n"); return 1; } vsid = get_vsid(mm->context.id, ea); psize = mm->context.user_psize; break; case VMALLOC_REGION_ID: mm = &init_mm; vsid = get_kernel_vsid(ea); if (ea < VMALLOC_END) psize = mmu_vmalloc_psize; else psize = mmu_io_psize; break; default: /* Not a valid range * Send the problem up to do_page_fault */ return 1; } DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); /* Get pgdir */ pgdir = mm->pgd; if (pgdir == NULL) return 1; /* Check CPU locality */ tmp = cpumask_of_cpu(smp_processor_id()); if (user_region && cpus_equal(mm->cpu_vm_mask, tmp)) local = 1; /* Handle hugepage regions */ if (unlikely(in_hugepage_area(mm->context, ea))) { DBG_LOW(" -> huge page !\n"); return hash_huge_page(mm, access, ea, vsid, local, trap); } /* Get PTE and page size from page tables */ ptep = find_linux_pte(pgdir, ea); if (ptep == NULL || !pte_present(*ptep)) { DBG_LOW(" no PTE !\n"); return 1; } #ifndef CONFIG_PPC_64K_PAGES DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep)); #else DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep), pte_val(*(ptep + PTRS_PER_PTE))); #endif /* Pre-check access permissions (will be re-checked atomically * in __hash_page_XX but this pre-check is a fast path */ if (access & ~pte_val(*ptep)) { DBG_LOW(" no access !\n"); return 1; } /* Do actual hashing */ #ifndef CONFIG_PPC_64K_PAGES rc = __hash_page_4K(ea, access, vsid, ptep, trap, local); #else if (mmu_ci_restrictions) { /* If this PTE is non-cacheable, switch to 4k */ if (psize == MMU_PAGE_64K && (pte_val(*ptep) & _PAGE_NO_CACHE)) { if (user_region) { psize = MMU_PAGE_4K; mm->context.user_psize = MMU_PAGE_4K; mm->context.sllp = SLB_VSID_USER | mmu_psize_defs[MMU_PAGE_4K].sllp; } else if (ea < VMALLOC_END) { /* * some driver did a non-cacheable mapping * in vmalloc space, so switch vmalloc * to 4k pages */ printk(KERN_ALERT "Reducing vmalloc segment " "to 4kB pages because of " "non-cacheable mapping\n"); psize = mmu_vmalloc_psize = MMU_PAGE_4K; } } if (user_region) { if (psize != get_paca()->context.user_psize) { get_paca()->context = mm->context; slb_flush_and_rebolt(); } } else if (get_paca()->vmalloc_sllp != mmu_psize_defs[mmu_vmalloc_psize].sllp) { get_paca()->vmalloc_sllp = mmu_psize_defs[mmu_vmalloc_psize].sllp; slb_flush_and_rebolt(); } } if (psize == MMU_PAGE_64K) rc = __hash_page_64K(ea, access, vsid, ptep, trap, local); else rc = __hash_page_4K(ea, access, vsid, ptep, trap, local); #endif /* CONFIG_PPC_64K_PAGES */ #ifndef CONFIG_PPC_64K_PAGES DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep)); #else DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep), pte_val(*(ptep + PTRS_PER_PTE))); #endif DBG_LOW(" -> rc=%d\n", rc); return rc; }