Example #1
0
static void pSeries_lpar_hugepage_invalidate(struct mm_struct *mm,
				       unsigned char *hpte_slot_array,
				       unsigned long addr, int psize)
{
	int ssize = 0, i, index = 0;
	unsigned long s_addr = addr;
	unsigned int max_hpte_count, valid;
	unsigned long vpn_array[PPC64_HUGE_HPTE_BATCH];
	unsigned long slot_array[PPC64_HUGE_HPTE_BATCH];
	unsigned long shift, hidx, vpn = 0, vsid, hash, slot;

	shift = mmu_psize_defs[psize].shift;
	max_hpte_count = 1U << (PMD_SHIFT - shift);

	for (i = 0; i < max_hpte_count; i++) {
		valid = hpte_valid(hpte_slot_array, i);
		if (!valid)
			continue;
		hidx =  hpte_hash_index(hpte_slot_array, i);

		/* get the vpn */
		addr = s_addr + (i * (1ul << shift));
		if (!is_kernel_addr(addr)) {
			ssize = user_segment_size(addr);
			vsid = get_vsid(mm->context.id, addr, ssize);
			WARN_ON(vsid == 0);
		} else {
			vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
			ssize = mmu_kernel_ssize;
		}

		vpn = hpt_vpn(addr, vsid, ssize);
		hash = hpt_hash(vpn, shift, ssize);
		if (hidx & _PTEIDX_SECONDARY)
			hash = ~hash;

		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
		slot += hidx & _PTEIDX_GROUP_IX;

		slot_array[index] = slot;
		vpn_array[index] = vpn;
		if (index == PPC64_HUGE_HPTE_BATCH - 1) {
			/*
			 * Now do a bluk invalidate
			 */
			__pSeries_lpar_hugepage_invalidate(slot_array,
							   vpn_array,
							   PPC64_HUGE_HPTE_BATCH,
							   psize, ssize);
			index = 0;
		} else
			index++;
	}
	if (index)
		__pSeries_lpar_hugepage_invalidate(slot_array, vpn_array,
						   index, psize, ssize);
}
Example #2
0
/*
 * Update the MMU hash table to correspond with a change to
 * a Linux PTE.  If wrprot is true, it is permissible to
 * change the existing HPTE to read-only rather than removing it
 * (if we remove it we should clear the _PTE_HPTEFLAGS bits).
 */
void hpte_update(struct mm_struct *mm, unsigned long addr,
                 pte_t *ptep, unsigned long pte, int huge)
{
    struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
    unsigned long vsid;
    unsigned int psize;
    int i;

    i = batch->index;

    /* We mask the address for the base page size. Huge pages will
     * have applied their own masking already
     */
    addr &= PAGE_MASK;

    /* Get page size (maybe move back to caller) */
    if (huge) {
#ifdef CONFIG_HUGETLB_PAGE
        psize = mmu_huge_psize;
#else
        BUG();
        psize = pte_pagesize_index(pte); /* shutup gcc */
#endif
    } else
        psize = pte_pagesize_index(pte);

    /*
     * This can happen when we are in the middle of a TLB batch and
     * we encounter memory pressure (eg copy_page_range when it tries
     * to allocate a new pte). If we have to reclaim memory and end
     * up scanning and resetting referenced bits then our batch context
     * will change mid stream.
     *
     * We also need to ensure only one page size is present in a given
     * batch
     */
    if (i != 0 && (mm != batch->mm || batch->psize != psize)) {
        flush_tlb_pending();
        i = 0;
    }
    if (i == 0) {
        batch->mm = mm;
        batch->psize = psize;
    }
    if (!is_kernel_addr(addr)) {
        vsid = get_vsid(mm->context.id, addr);
        WARN_ON(vsid == 0);
    } else
        vsid = get_kernel_vsid(addr);
    batch->vaddr[i] = (vsid << 28 ) | (addr & 0x0fffffff);
    batch->pte[i] = __real_pte(__pte(pte), ptep);
    batch->index = ++i;
    if (i >= PPC64_TLB_BATCH_NR)
        flush_tlb_pending();
}
Example #3
0
void hash_preload(struct mm_struct *mm, unsigned long ea,
		  unsigned long access, unsigned long trap)
{
	unsigned long vsid;
	void *pgdir;
	pte_t *ptep;
	cpumask_t mask;
	unsigned long flags;
	int local = 0;

	/* We don't want huge pages prefaulted for now
	 */
	if (unlikely(in_hugepage_area(mm->context, ea)))
		return;

	DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx,"
		" trap=%lx\n", mm, mm->pgd, ea, access, trap);

	/* Get PTE, VSID, access mask */
	pgdir = mm->pgd;
	if (pgdir == NULL)
		return;
	ptep = find_linux_pte(pgdir, ea);
	if (!ptep)
		return;
	vsid = get_vsid(mm->context.id, ea);

	/* Hash it in */
	local_irq_save(flags);
	mask = cpumask_of_cpu(smp_processor_id());
	if (cpus_equal(mm->cpu_vm_mask, mask))
		local = 1;
#ifndef CONFIG_PPC_64K_PAGES
	__hash_page_4K(ea, access, vsid, ptep, trap, local);
#else
	if (mmu_ci_restrictions) {
		/* If this PTE is non-cacheable, switch to 4k */
		if (mm->context.user_psize == MMU_PAGE_64K &&
		    (pte_val(*ptep) & _PAGE_NO_CACHE)) {
			mm->context.user_psize = MMU_PAGE_4K;
			mm->context.sllp = SLB_VSID_USER |
				mmu_psize_defs[MMU_PAGE_4K].sllp;
			get_paca()->context = mm->context;
			slb_flush_and_rebolt();
		}
	}
	if (mm->context.user_psize == MMU_PAGE_64K)
		__hash_page_64K(ea, access, vsid, ptep, trap, local);
	else
		__hash_page_4K(ea, access, vsid, ptep, trap, local);
#endif /* CONFIG_PPC_64K_PAGES */
	local_irq_restore(flags);
}
Example #4
0
static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
{
	struct spu_priv2 __iomem *priv2 = spu->priv2;
	struct mm_struct *mm = spu->mm;
	u64 esid, vsid;

	pr_debug("%s\n", __FUNCTION__);

	if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
		/* SLBs are pre-loaded for context switch, so
		 * we should never get here!
		 */
		printk("%s: invalid access during switch!\n", __func__);
		return 1;
	}
	if (!mm || (REGION_ID(ea) != USER_REGION_ID)) {
		/* Future: support kernel segments so that drivers
		 * can use SPUs.
		 */
		pr_debug("invalid region access at %016lx\n", ea);
		return 1;
	}

	esid = (ea & ESID_MASK) | SLB_ESID_V;
	vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) | SLB_VSID_USER;
	if (in_hugepage_area(mm->context, ea))
		vsid |= SLB_VSID_L;

	out_be64(&priv2->slb_index_W, spu->slb_replace);
	out_be64(&priv2->slb_vsid_RW, vsid);
	out_be64(&priv2->slb_esid_RW, esid);

	spu->slb_replace++;
	if (spu->slb_replace >= 8)
		spu->slb_replace = 0;

	spu_restart_dma(spu);

	return 0;
}
Example #5
0
/*
 * A linux PTE was changed and the corresponding hash table entry
 * neesd to be flushed. This function will either perform the flush
 * immediately or will batch it up if the current CPU has an active
 * batch on it.
 */
void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
                     pte_t *ptep, unsigned long pte, int huge)
{
    unsigned long vpn;
    struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
    unsigned long vsid;
    unsigned int psize;
    int ssize;
    real_pte_t rpte;
    int i;

    i = batch->index;

    /* Get page size (maybe move back to caller).
     *
     * NOTE: when using special 64K mappings in 4K environment like
     * for SPEs, we obtain the page size from the slice, which thus
     * must still exist (and thus the VMA not reused) at the time
     * of this call
     */
    if (huge) {
#ifdef CONFIG_HUGETLB_PAGE
        psize = get_slice_psize(mm, addr);
        /* Mask the address for the correct page size */
        addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
#else
        BUG();
        psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
#endif
    } else {
        psize = pte_pagesize_index(mm, addr, pte);
        /* Mask the address for the standard page size.  If we
         * have a 64k page kernel, but the hardware does not
         * support 64k pages, this might be different from the
         * hardware page size encoded in the slice table. */
        addr &= PAGE_MASK;
    }


    /* Build full vaddr */
    if (!is_kernel_addr(addr)) {
        ssize = user_segment_size(addr);
        vsid = get_vsid(mm->context.id, addr, ssize);
    } else {
        vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
        ssize = mmu_kernel_ssize;
    }
    WARN_ON(vsid == 0);
    vpn = hpt_vpn(addr, vsid, ssize);
    rpte = __real_pte(__pte(pte), ptep);

    /*
     * Check if we have an active batch on this CPU. If not, just
     * flush now and return. For now, we don global invalidates
     * in that case, might be worth testing the mm cpu mask though
     * and decide to use local invalidates instead...
     */
    if (!batch->active) {
        flush_hash_page(vpn, rpte, psize, ssize, 0);
        put_cpu_var(ppc64_tlb_batch);
        return;
    }

    /*
     * This can happen when we are in the middle of a TLB batch and
     * we encounter memory pressure (eg copy_page_range when it tries
     * to allocate a new pte). If we have to reclaim memory and end
     * up scanning and resetting referenced bits then our batch context
     * will change mid stream.
     *
     * We also need to ensure only one page size is present in a given
     * batch
     */
    if (i != 0 && (mm != batch->mm || batch->psize != psize ||
                   batch->ssize != ssize)) {
        __flush_tlb_pending(batch);
        i = 0;
    }
    if (i == 0) {
        batch->mm = mm;
        batch->psize = psize;
        batch->ssize = ssize;
    }
    batch->pte[i] = rpte;
    batch->vpn[i] = vpn;
    batch->index = ++i;
    if (i >= PPC64_TLB_BATCH_NR)
        __flush_tlb_pending(batch);
    put_cpu_var(ppc64_tlb_batch);
}
Example #6
0
/* Result code is:
 *  0 - handled
 *  1 - normal page fault
 * -1 - critical hash insertion error
 */
int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
{
	void *pgdir;
	unsigned long vsid;
	struct mm_struct *mm;
	pte_t *ptep;
	cpumask_t tmp;
	int rc, user_region = 0, local = 0;
	int psize;

	DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
		ea, access, trap);

	if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) {
		DBG_LOW(" out of pgtable range !\n");
 		return 1;
	}

	/* Get region & vsid */
 	switch (REGION_ID(ea)) {
	case USER_REGION_ID:
		user_region = 1;
		mm = current->mm;
		if (! mm) {
			DBG_LOW(" user region with no mm !\n");
			return 1;
		}
		vsid = get_vsid(mm->context.id, ea);
		psize = mm->context.user_psize;
		break;
	case VMALLOC_REGION_ID:
		mm = &init_mm;
		vsid = get_kernel_vsid(ea);
		if (ea < VMALLOC_END)
			psize = mmu_vmalloc_psize;
		else
			psize = mmu_io_psize;
		break;
	default:
		/* Not a valid range
		 * Send the problem up to do_page_fault 
		 */
		return 1;
	}
	DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);

	/* Get pgdir */
	pgdir = mm->pgd;
	if (pgdir == NULL)
		return 1;

	/* Check CPU locality */
	tmp = cpumask_of_cpu(smp_processor_id());
	if (user_region && cpus_equal(mm->cpu_vm_mask, tmp))
		local = 1;

	/* Handle hugepage regions */
	if (unlikely(in_hugepage_area(mm->context, ea))) {
		DBG_LOW(" -> huge page !\n");
		return hash_huge_page(mm, access, ea, vsid, local, trap);
	}

	/* Get PTE and page size from page tables */
	ptep = find_linux_pte(pgdir, ea);
	if (ptep == NULL || !pte_present(*ptep)) {
		DBG_LOW(" no PTE !\n");
		return 1;
	}

#ifndef CONFIG_PPC_64K_PAGES
	DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep));
#else
	DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep),
		pte_val(*(ptep + PTRS_PER_PTE)));
#endif
	/* Pre-check access permissions (will be re-checked atomically
	 * in __hash_page_XX but this pre-check is a fast path
	 */
	if (access & ~pte_val(*ptep)) {
		DBG_LOW(" no access !\n");
		return 1;
	}

	/* Do actual hashing */
#ifndef CONFIG_PPC_64K_PAGES
	rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
#else
	if (mmu_ci_restrictions) {
		/* If this PTE is non-cacheable, switch to 4k */
		if (psize == MMU_PAGE_64K &&
		    (pte_val(*ptep) & _PAGE_NO_CACHE)) {
			if (user_region) {
				psize = MMU_PAGE_4K;
				mm->context.user_psize = MMU_PAGE_4K;
				mm->context.sllp = SLB_VSID_USER |
					mmu_psize_defs[MMU_PAGE_4K].sllp;
			} else if (ea < VMALLOC_END) {
				/*
				 * some driver did a non-cacheable mapping
				 * in vmalloc space, so switch vmalloc
				 * to 4k pages
				 */
				printk(KERN_ALERT "Reducing vmalloc segment "
				       "to 4kB pages because of "
				       "non-cacheable mapping\n");
				psize = mmu_vmalloc_psize = MMU_PAGE_4K;
			}
		}
		if (user_region) {
			if (psize != get_paca()->context.user_psize) {
				get_paca()->context = mm->context;
				slb_flush_and_rebolt();
			}
		} else if (get_paca()->vmalloc_sllp !=
			   mmu_psize_defs[mmu_vmalloc_psize].sllp) {
			get_paca()->vmalloc_sllp =
				mmu_psize_defs[mmu_vmalloc_psize].sllp;
			slb_flush_and_rebolt();
		}
	}
	if (psize == MMU_PAGE_64K)
		rc = __hash_page_64K(ea, access, vsid, ptep, trap, local);
	else
		rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
#endif /* CONFIG_PPC_64K_PAGES */

#ifndef CONFIG_PPC_64K_PAGES
	DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
#else
	DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep),
		pte_val(*(ptep + PTRS_PER_PTE)));
#endif
	DBG_LOW(" -> rc=%d\n", rc);
	return rc;
}
Example #7
0
/*
 * A linux PTE was changed and the corresponding hash table entry
 * neesd to be flushed. This function will either perform the flush
 * immediately or will batch it up if the current CPU has an active
 * batch on it.
 *
 * Must be called from within some kind of spinlock/non-preempt region...
 */
void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
		     pte_t *ptep, unsigned long pte, int huge)
{
	struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
	unsigned long vsid, vaddr;
	unsigned int psize;
	real_pte_t rpte;
	int i;

	i = batch->index;

	/* We mask the address for the base page size. Huge pages will
	 * have applied their own masking already
	 */
	addr &= PAGE_MASK;

	/* Get page size (maybe move back to caller).
	 *
	 * NOTE: when using special 64K mappings in 4K environment like
	 * for SPEs, we obtain the page size from the slice, which thus
	 * must still exist (and thus the VMA not reused) at the time
	 * of this call
	 */
	if (huge) {
#ifdef CONFIG_HUGETLB_PAGE
		psize = mmu_huge_psize;
#else
		BUG();
		psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
#endif
	} else
		psize = pte_pagesize_index(mm, addr, pte);

	/* Build full vaddr */
	if (!is_kernel_addr(addr)) {
		vsid = get_vsid(mm->context.id, addr);
		WARN_ON(vsid == 0);
	} else
		vsid = get_kernel_vsid(addr);
	vaddr = (vsid << 28 ) | (addr & 0x0fffffff);
	rpte = __real_pte(__pte(pte), ptep);

	/*
	 * Check if we have an active batch on this CPU. If not, just
	 * flush now and return. For now, we don global invalidates
	 * in that case, might be worth testing the mm cpu mask though
	 * and decide to use local invalidates instead...
	 */
	if (!batch->active) {
		flush_hash_page(vaddr, rpte, psize, 0);
		return;
	}

	/*
	 * This can happen when we are in the middle of a TLB batch and
	 * we encounter memory pressure (eg copy_page_range when it tries
	 * to allocate a new pte). If we have to reclaim memory and end
	 * up scanning and resetting referenced bits then our batch context
	 * will change mid stream.
	 *
	 * We also need to ensure only one page size is present in a given
	 * batch
	 */
	if (i != 0 && (mm != batch->mm || batch->psize != psize)) {
		__flush_tlb_pending(batch);
		i = 0;
	}
	if (i == 0) {
		batch->mm = mm;
		batch->psize = psize;
	}
	batch->pte[i] = rpte;
	batch->vaddr[i] = vaddr;
	batch->index = ++i;
	if (i >= PPC64_TLB_BATCH_NR)
		__flush_tlb_pending(batch);
}