/* * Update the MMU hash table to correspond with a change to * a Linux PTE. If wrprot is true, it is permissible to * change the existing HPTE to read-only rather than removing it * (if we remove it we should clear the _PTE_HPTEFLAGS bits). */ void hpte_update(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long pte, int huge) { struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); unsigned long vsid; unsigned int psize; int i; i = batch->index; /* We mask the address for the base page size. Huge pages will * have applied their own masking already */ addr &= PAGE_MASK; /* Get page size (maybe move back to caller) */ if (huge) { #ifdef CONFIG_HUGETLB_PAGE psize = mmu_huge_psize; #else BUG(); psize = pte_pagesize_index(pte); /* shutup gcc */ #endif } else psize = pte_pagesize_index(pte); /* * This can happen when we are in the middle of a TLB batch and * we encounter memory pressure (eg copy_page_range when it tries * to allocate a new pte). If we have to reclaim memory and end * up scanning and resetting referenced bits then our batch context * will change mid stream. * * We also need to ensure only one page size is present in a given * batch */ if (i != 0 && (mm != batch->mm || batch->psize != psize)) { flush_tlb_pending(); i = 0; } if (i == 0) { batch->mm = mm; batch->psize = psize; } if (!is_kernel_addr(addr)) { vsid = get_vsid(mm->context.id, addr); WARN_ON(vsid == 0); } else vsid = get_kernel_vsid(addr); batch->vaddr[i] = (vsid << 28 ) | (addr & 0x0fffffff); batch->pte[i] = __real_pte(__pte(pte), ptep); batch->index = ++i; if (i >= PPC64_TLB_BATCH_NR) flush_tlb_pending(); }
/* * A linux PTE was changed and the corresponding hash table entry * neesd to be flushed. This function will either perform the flush * immediately or will batch it up if the current CPU has an active * batch on it. */ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long pte, int huge) { unsigned long vpn; struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); unsigned long vsid; unsigned int psize; int ssize; real_pte_t rpte; int i; i = batch->index; /* Get page size (maybe move back to caller). * * NOTE: when using special 64K mappings in 4K environment like * for SPEs, we obtain the page size from the slice, which thus * must still exist (and thus the VMA not reused) at the time * of this call */ if (huge) { #ifdef CONFIG_HUGETLB_PAGE psize = get_slice_psize(mm, addr); /* Mask the address for the correct page size */ addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1); #else BUG(); psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */ #endif } else { psize = pte_pagesize_index(mm, addr, pte); /* Mask the address for the standard page size. If we * have a 64k page kernel, but the hardware does not * support 64k pages, this might be different from the * hardware page size encoded in the slice table. */ addr &= PAGE_MASK; } /* Build full vaddr */ if (!is_kernel_addr(addr)) { ssize = user_segment_size(addr); vsid = get_vsid(mm->context.id, addr, ssize); } else { vsid = get_kernel_vsid(addr, mmu_kernel_ssize); ssize = mmu_kernel_ssize; } WARN_ON(vsid == 0); vpn = hpt_vpn(addr, vsid, ssize); rpte = __real_pte(__pte(pte), ptep); /* * Check if we have an active batch on this CPU. If not, just * flush now and return. For now, we don global invalidates * in that case, might be worth testing the mm cpu mask though * and decide to use local invalidates instead... */ if (!batch->active) { flush_hash_page(vpn, rpte, psize, ssize, 0); put_cpu_var(ppc64_tlb_batch); return; } /* * This can happen when we are in the middle of a TLB batch and * we encounter memory pressure (eg copy_page_range when it tries * to allocate a new pte). If we have to reclaim memory and end * up scanning and resetting referenced bits then our batch context * will change mid stream. * * We also need to ensure only one page size is present in a given * batch */ if (i != 0 && (mm != batch->mm || batch->psize != psize || batch->ssize != ssize)) { __flush_tlb_pending(batch); i = 0; } if (i == 0) { batch->mm = mm; batch->psize = psize; batch->ssize = ssize; } batch->pte[i] = rpte; batch->vpn[i] = vpn; batch->index = ++i; if (i >= PPC64_TLB_BATCH_NR) __flush_tlb_pending(batch); put_cpu_var(ppc64_tlb_batch); }
int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, pte_t *ptep, unsigned long trap, unsigned long flags, int ssize, int subpg_prot) { real_pte_t rpte; unsigned long *hidxp; unsigned long hpte_group; unsigned int subpg_index; unsigned long rflags, pa, hidx; unsigned long old_pte, new_pte, subpg_pte; unsigned long vpn, hash, slot; unsigned long shift = mmu_psize_defs[MMU_PAGE_4K].shift; /* * atomically mark the linux large page PTE busy and dirty */ do { pte_t pte = READ_ONCE(*ptep); old_pte = pte_val(pte); /* If PTE busy, retry the access */ if (unlikely(old_pte & H_PAGE_BUSY)) return 0; /* If PTE permissions don't match, take page fault */ if (unlikely(!check_pte_access(access, old_pte))) return 1; /* * Try to lock the PTE, add ACCESSED and DIRTY if it was * a write access. Since this is 4K insert of 64K page size * also add H_PAGE_COMBO */ new_pte = old_pte | H_PAGE_BUSY | _PAGE_ACCESSED | H_PAGE_COMBO; if (access & _PAGE_WRITE) new_pte |= _PAGE_DIRTY; } while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte))); /* * Handle the subpage protection bits */ subpg_pte = new_pte & ~subpg_prot; rflags = htab_convert_pte_flags(subpg_pte); if (!cpu_has_feature(CPU_FTR_NOEXECUTE) && !cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) { /* * No CPU has hugepages but lacks no execute, so we * don't need to worry about that case */ rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap); } subpg_index = (ea & (PAGE_SIZE - 1)) >> shift; vpn = hpt_vpn(ea, vsid, ssize); rpte = __real_pte(__pte(old_pte), ptep); /* *None of the sub 4k page is hashed */ if (!(old_pte & H_PAGE_HASHPTE)) goto htab_insert_hpte; /* * Check if the pte was already inserted into the hash table * as a 64k HW page, and invalidate the 64k HPTE if so. */ if (!(old_pte & H_PAGE_COMBO)) { flush_hash_page(vpn, rpte, MMU_PAGE_64K, ssize, flags); /* * clear the old slot details from the old and new pte. * On hash insert failure we use old pte value and we don't * want slot information there if we have a insert failure. */ old_pte &= ~(H_PAGE_HASHPTE | H_PAGE_F_GIX | H_PAGE_F_SECOND); new_pte &= ~(H_PAGE_HASHPTE | H_PAGE_F_GIX | H_PAGE_F_SECOND); goto htab_insert_hpte; } /* * Check for sub page valid and update */ if (__rpte_sub_valid(rpte, subpg_index)) { int ret; hash = hpt_hash(vpn, shift, ssize); hidx = __rpte_to_hidx(rpte, subpg_index); if (hidx & _PTEIDX_SECONDARY) hash = ~hash; slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot += hidx & _PTEIDX_GROUP_IX; ret = mmu_hash_ops.hpte_updatepp(slot, rflags, vpn, MMU_PAGE_4K, MMU_PAGE_4K, ssize, flags); /* *if we failed because typically the HPTE wasn't really here * we try an insertion. */ if (ret == -1) goto htab_insert_hpte; *ptep = __pte(new_pte & ~H_PAGE_BUSY); return 0; } htab_insert_hpte: /* * handle H_PAGE_4K_PFN case */ if (old_pte & H_PAGE_4K_PFN) { /* * All the sub 4k page have the same * physical address. */ pa = pte_pfn(__pte(old_pte)) << HW_PAGE_SHIFT; } else { pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT; pa += (subpg_index << shift); } hash = hpt_hash(vpn, shift, ssize); repeat: hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; /* Insert into the hash table, primary slot */ slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0, MMU_PAGE_4K, MMU_PAGE_4K, ssize); /* * Primary is full, try the secondary */ if (unlikely(slot == -1)) { hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, HPTE_V_SECONDARY, MMU_PAGE_4K, MMU_PAGE_4K, ssize); if (slot == -1) { if (mftb() & 0x1) hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; mmu_hash_ops.hpte_remove(hpte_group); /* * FIXME!! Should be try the group from which we removed ? */ goto repeat; } } /* * Hypervisor failure. Restore old pte and return -1 * similar to __hash_page_* */ if (unlikely(slot == -2)) { *ptep = __pte(old_pte); hash_failure_debug(ea, access, vsid, trap, ssize, MMU_PAGE_4K, MMU_PAGE_4K, old_pte); return -1; } /* * Insert slot number & secondary bit in PTE second half, * clear H_PAGE_BUSY and set appropriate HPTE slot bit * Since we have H_PAGE_BUSY set on ptep, we can be sure * nobody is undating hidx. */ hidxp = (unsigned long *)(ptep + PTRS_PER_PTE); rpte.hidx &= ~(0xfUL << (subpg_index << 2)); *hidxp = rpte.hidx | (slot << (subpg_index << 2)); new_pte = mark_subptegroup_valid(new_pte, subpg_index); new_pte |= H_PAGE_HASHPTE; /* * check __real_pte for details on matching smp_rmb() */ smp_wmb(); *ptep = __pte(new_pte & ~H_PAGE_BUSY); return 0; }
int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, pte_t *ptep, unsigned long trap, unsigned long flags, int ssize, int subpg_prot) { real_pte_t rpte; unsigned long hpte_group; unsigned int subpg_index; unsigned long rflags, pa; unsigned long old_pte, new_pte, subpg_pte; unsigned long vpn, hash, slot, gslot; unsigned long shift = mmu_psize_defs[MMU_PAGE_4K].shift; /* * atomically mark the linux large page PTE busy and dirty */ do { pte_t pte = READ_ONCE(*ptep); old_pte = pte_val(pte); /* If PTE busy, retry the access */ if (unlikely(old_pte & H_PAGE_BUSY)) return 0; /* If PTE permissions don't match, take page fault */ if (unlikely(!check_pte_access(access, old_pte))) return 1; /* * Try to lock the PTE, add ACCESSED and DIRTY if it was * a write access. Since this is 4K insert of 64K page size * also add H_PAGE_COMBO */ new_pte = old_pte | H_PAGE_BUSY | _PAGE_ACCESSED | H_PAGE_COMBO; if (access & _PAGE_WRITE) new_pte |= _PAGE_DIRTY; } while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte))); /* * Handle the subpage protection bits */ subpg_pte = new_pte & ~subpg_prot; rflags = htab_convert_pte_flags(subpg_pte); if (cpu_has_feature(CPU_FTR_NOEXECUTE) && !cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) { /* * No CPU has hugepages but lacks no execute, so we * don't need to worry about that case */ rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap); } subpg_index = (ea & (PAGE_SIZE - 1)) >> shift; vpn = hpt_vpn(ea, vsid, ssize); rpte = __real_pte(__pte(old_pte), ptep); /* *None of the sub 4k page is hashed */ if (!(old_pte & H_PAGE_HASHPTE)) goto htab_insert_hpte; /* * Check if the pte was already inserted into the hash table * as a 64k HW page, and invalidate the 64k HPTE if so. */ if (!(old_pte & H_PAGE_COMBO)) { flush_hash_page(vpn, rpte, MMU_PAGE_64K, ssize, flags); /* * clear the old slot details from the old and new pte. * On hash insert failure we use old pte value and we don't * want slot information there if we have a insert failure. */ old_pte &= ~H_PAGE_HASHPTE; new_pte &= ~H_PAGE_HASHPTE; goto htab_insert_hpte; } /* * Check for sub page valid and update */ if (__rpte_sub_valid(rpte, subpg_index)) { int ret; gslot = pte_get_hash_gslot(vpn, shift, ssize, rpte, subpg_index); ret = mmu_hash_ops.hpte_updatepp(gslot, rflags, vpn, MMU_PAGE_4K, MMU_PAGE_4K, ssize, flags); /* * If we failed because typically the HPTE wasn't really here * we try an insertion. */ if (ret == -1) goto htab_insert_hpte; *ptep = __pte(new_pte & ~H_PAGE_BUSY); return 0; } htab_insert_hpte: /* * Initialize all hidx entries to invalid value, the first time * the PTE is about to allocate a 4K HPTE. */ if (!(old_pte & H_PAGE_COMBO)) rpte.hidx = INVALID_RPTE_HIDX; /* * handle H_PAGE_4K_PFN case */ if (old_pte & H_PAGE_4K_PFN) { /* * All the sub 4k page have the same * physical address. */ pa = pte_pfn(__pte(old_pte)) << HW_PAGE_SHIFT; } else { pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT; pa += (subpg_index << shift); } hash = hpt_hash(vpn, shift, ssize); repeat: hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; /* Insert into the hash table, primary slot */ slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0, MMU_PAGE_4K, MMU_PAGE_4K, ssize); /* * Primary is full, try the secondary */ if (unlikely(slot == -1)) { bool soft_invalid; hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, HPTE_V_SECONDARY, MMU_PAGE_4K, MMU_PAGE_4K, ssize); soft_invalid = hpte_soft_invalid(slot); if (unlikely(soft_invalid)) { /* * We got a valid slot from a hardware point of view. * but we cannot use it, because we use this special * value; as defined by hpte_soft_invalid(), to track * invalid slots. We cannot use it. So invalidate it. */ gslot = slot & _PTEIDX_GROUP_IX; mmu_hash_ops.hpte_invalidate(hpte_group + gslot, vpn, MMU_PAGE_4K, MMU_PAGE_4K, ssize, 0); } if (unlikely(slot == -1 || soft_invalid)) { /* * For soft invalid slot, let's ensure that we release a * slot from the primary, with the hope that we will * acquire that slot next time we try. This will ensure * that we do not get the same soft-invalid slot. */ if (soft_invalid || (mftb() & 0x1)) hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; mmu_hash_ops.hpte_remove(hpte_group); /* * FIXME!! Should be try the group from which we removed ? */ goto repeat; } } /* * Hypervisor failure. Restore old pte and return -1 * similar to __hash_page_* */ if (unlikely(slot == -2)) { *ptep = __pte(old_pte); hash_failure_debug(ea, access, vsid, trap, ssize, MMU_PAGE_4K, MMU_PAGE_4K, old_pte); return -1; } new_pte |= pte_set_hidx(ptep, rpte, subpg_index, slot); new_pte |= H_PAGE_HASHPTE; *ptep = __pte(new_pte & ~H_PAGE_BUSY); return 0; }
int __hash_page_64K(unsigned long ea, unsigned long access, unsigned long vsid, pte_t *ptep, unsigned long trap, unsigned long flags, int ssize) { real_pte_t rpte; unsigned long hpte_group; unsigned long rflags, pa; unsigned long old_pte, new_pte; unsigned long vpn, hash, slot; unsigned long shift = mmu_psize_defs[MMU_PAGE_64K].shift; /* * atomically mark the linux large page PTE busy and dirty */ do { pte_t pte = READ_ONCE(*ptep); old_pte = pte_val(pte); /* If PTE busy, retry the access */ if (unlikely(old_pte & H_PAGE_BUSY)) return 0; /* If PTE permissions don't match, take page fault */ if (unlikely(!check_pte_access(access, old_pte))) return 1; /* * Check if PTE has the cache-inhibit bit set * If so, bail out and refault as a 4k page */ if (!mmu_has_feature(MMU_FTR_CI_LARGE_PAGE) && unlikely(pte_ci(pte))) return 0; /* * Try to lock the PTE, add ACCESSED and DIRTY if it was * a write access. */ new_pte = old_pte | H_PAGE_BUSY | _PAGE_ACCESSED; if (access & _PAGE_WRITE) new_pte |= _PAGE_DIRTY; } while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte))); rflags = htab_convert_pte_flags(new_pte); rpte = __real_pte(__pte(old_pte), ptep); if (cpu_has_feature(CPU_FTR_NOEXECUTE) && !cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap); vpn = hpt_vpn(ea, vsid, ssize); if (unlikely(old_pte & H_PAGE_HASHPTE)) { unsigned long gslot; /* * There MIGHT be an HPTE for this pte */ gslot = pte_get_hash_gslot(vpn, shift, ssize, rpte, 0); if (mmu_hash_ops.hpte_updatepp(gslot, rflags, vpn, MMU_PAGE_64K, MMU_PAGE_64K, ssize, flags) == -1) old_pte &= ~_PAGE_HPTEFLAGS; } if (likely(!(old_pte & H_PAGE_HASHPTE))) { pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT; hash = hpt_hash(vpn, shift, ssize); repeat: hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; /* Insert into the hash table, primary slot */ slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0, MMU_PAGE_64K, MMU_PAGE_64K, ssize); /* * Primary is full, try the secondary */ if (unlikely(slot == -1)) { hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, HPTE_V_SECONDARY, MMU_PAGE_64K, MMU_PAGE_64K, ssize); if (slot == -1) { if (mftb() & 0x1) hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; mmu_hash_ops.hpte_remove(hpte_group); /* * FIXME!! Should be try the group from which we removed ? */ goto repeat; } } /* * Hypervisor failure. Restore old pte and return -1 * similar to __hash_page_* */ if (unlikely(slot == -2)) { *ptep = __pte(old_pte); hash_failure_debug(ea, access, vsid, trap, ssize, MMU_PAGE_64K, MMU_PAGE_64K, old_pte); return -1; } new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE; new_pte |= pte_set_hidx(ptep, rpte, 0, slot); } *ptep = __pte(new_pte & ~H_PAGE_BUSY); return 0; }
/* * A linux PTE was changed and the corresponding hash table entry * neesd to be flushed. This function will either perform the flush * immediately or will batch it up if the current CPU has an active * batch on it. * * Must be called from within some kind of spinlock/non-preempt region... */ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long pte, int huge) { struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); unsigned long vsid, vaddr; unsigned int psize; real_pte_t rpte; int i; i = batch->index; /* We mask the address for the base page size. Huge pages will * have applied their own masking already */ addr &= PAGE_MASK; /* Get page size (maybe move back to caller). * * NOTE: when using special 64K mappings in 4K environment like * for SPEs, we obtain the page size from the slice, which thus * must still exist (and thus the VMA not reused) at the time * of this call */ if (huge) { #ifdef CONFIG_HUGETLB_PAGE psize = mmu_huge_psize; #else BUG(); psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */ #endif } else psize = pte_pagesize_index(mm, addr, pte); /* Build full vaddr */ if (!is_kernel_addr(addr)) { vsid = get_vsid(mm->context.id, addr); WARN_ON(vsid == 0); } else vsid = get_kernel_vsid(addr); vaddr = (vsid << 28 ) | (addr & 0x0fffffff); rpte = __real_pte(__pte(pte), ptep); /* * Check if we have an active batch on this CPU. If not, just * flush now and return. For now, we don global invalidates * in that case, might be worth testing the mm cpu mask though * and decide to use local invalidates instead... */ if (!batch->active) { flush_hash_page(vaddr, rpte, psize, 0); return; } /* * This can happen when we are in the middle of a TLB batch and * we encounter memory pressure (eg copy_page_range when it tries * to allocate a new pte). If we have to reclaim memory and end * up scanning and resetting referenced bits then our batch context * will change mid stream. * * We also need to ensure only one page size is present in a given * batch */ if (i != 0 && (mm != batch->mm || batch->psize != psize)) { __flush_tlb_pending(batch); i = 0; } if (i == 0) { batch->mm = mm; batch->psize = psize; } batch->pte[i] = rpte; batch->vaddr[i] = vaddr; batch->index = ++i; if (i >= PPC64_TLB_BATCH_NR) __flush_tlb_pending(batch); }