void flush_tlb_pending(void)
{
	struct tlb_batch *tb = &get_cpu_var(tlb_batch);
	struct mm_struct *mm = tb->mm;

	if (!tb->tlb_nr)
		goto out;

	flush_tsb_user(tb);

	if (CTX_VALID(mm->context)) {
		if (tb->tlb_nr == 1) {
			global_flush_tlb_page(mm, tb->vaddrs[0]);
		} else {
#ifdef CONFIG_SMP
			smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
					      &tb->vaddrs[0]);
#else
			__flush_tlb_pending(CTX_HWBITS(tb->mm->context),
					    tb->tlb_nr, &tb->vaddrs[0]);
#endif
		}
	}

	tb->tlb_nr = 0;

out:
	put_cpu_var(tlb_batch);
}
Esempio n. 2
0
void tlb_flush(struct mmu_gather *tlb)
{
    struct ppc64_tlb_batch *tlbbatch = &get_cpu_var(ppc64_tlb_batch);

    /* If there's a TLB batch pending, then we must flush it because the
     * pages are going to be freed and we really don't want to have a CPU
     * access a freed page because it has a stale TLB
     */
    if (tlbbatch->index)
        __flush_tlb_pending(tlbbatch);

    put_cpu_var(ppc64_tlb_batch);
}
Esempio n. 3
0
void flush_tlb_pending(void)
{
    struct mmu_gather *mp = &get_cpu_var(mmu_gathers);

    if (mp->tlb_nr) {
        flush_tsb_user(mp);

        if (CTX_VALID(mp->mm->context)) {
#ifdef CONFIG_SMP
            smp_flush_tlb_pending(mp->mm, mp->tlb_nr,
                                  &mp->vaddrs[0]);
#else
            __flush_tlb_pending(CTX_HWBITS(mp->mm->context),
                                mp->tlb_nr, &mp->vaddrs[0]);
#endif
        }
        mp->tlb_nr = 0;
    }

    put_cpu_var(mmu_gathers);
}
Esempio n. 4
0
void flush_tlb_pending(void)
{
	struct tlb_batch *tb = &get_cpu_var(tlb_batch);

	if (tb->tlb_nr) {
		flush_tsb_user(tb);

		if (CTX_VALID(tb->mm->context)) {
#ifdef CONFIG_SMP
			smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
					      &tb->vaddrs[0]);
#else
			__flush_tlb_pending(CTX_HWBITS(tb->mm->context),
					    tb->tlb_nr, &tb->vaddrs[0]);
#endif
		}
		tb->tlb_nr = 0;
	}

	put_cpu_var(tlb_batch);
}
Esempio n. 5
0
/*
 * A linux PTE was changed and the corresponding hash table entry
 * neesd to be flushed. This function will either perform the flush
 * immediately or will batch it up if the current CPU has an active
 * batch on it.
 */
void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
                     pte_t *ptep, unsigned long pte, int huge)
{
    unsigned long vpn;
    struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
    unsigned long vsid;
    unsigned int psize;
    int ssize;
    real_pte_t rpte;
    int i;

    i = batch->index;

    /* Get page size (maybe move back to caller).
     *
     * NOTE: when using special 64K mappings in 4K environment like
     * for SPEs, we obtain the page size from the slice, which thus
     * must still exist (and thus the VMA not reused) at the time
     * of this call
     */
    if (huge) {
#ifdef CONFIG_HUGETLB_PAGE
        psize = get_slice_psize(mm, addr);
        /* Mask the address for the correct page size */
        addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
#else
        BUG();
        psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
#endif
    } else {
        psize = pte_pagesize_index(mm, addr, pte);
        /* Mask the address for the standard page size.  If we
         * have a 64k page kernel, but the hardware does not
         * support 64k pages, this might be different from the
         * hardware page size encoded in the slice table. */
        addr &= PAGE_MASK;
    }


    /* Build full vaddr */
    if (!is_kernel_addr(addr)) {
        ssize = user_segment_size(addr);
        vsid = get_vsid(mm->context.id, addr, ssize);
    } else {
        vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
        ssize = mmu_kernel_ssize;
    }
    WARN_ON(vsid == 0);
    vpn = hpt_vpn(addr, vsid, ssize);
    rpte = __real_pte(__pte(pte), ptep);

    /*
     * Check if we have an active batch on this CPU. If not, just
     * flush now and return. For now, we don global invalidates
     * in that case, might be worth testing the mm cpu mask though
     * and decide to use local invalidates instead...
     */
    if (!batch->active) {
        flush_hash_page(vpn, rpte, psize, ssize, 0);
        put_cpu_var(ppc64_tlb_batch);
        return;
    }

    /*
     * This can happen when we are in the middle of a TLB batch and
     * we encounter memory pressure (eg copy_page_range when it tries
     * to allocate a new pte). If we have to reclaim memory and end
     * up scanning and resetting referenced bits then our batch context
     * will change mid stream.
     *
     * We also need to ensure only one page size is present in a given
     * batch
     */
    if (i != 0 && (mm != batch->mm || batch->psize != psize ||
                   batch->ssize != ssize)) {
        __flush_tlb_pending(batch);
        i = 0;
    }
    if (i == 0) {
        batch->mm = mm;
        batch->psize = psize;
        batch->ssize = ssize;
    }
    batch->pte[i] = rpte;
    batch->vpn[i] = vpn;
    batch->index = ++i;
    if (i >= PPC64_TLB_BATCH_NR)
        __flush_tlb_pending(batch);
    put_cpu_var(ppc64_tlb_batch);
}
Esempio n. 6
0
/*
 * A linux PTE was changed and the corresponding hash table entry
 * neesd to be flushed. This function will either perform the flush
 * immediately or will batch it up if the current CPU has an active
 * batch on it.
 *
 * Must be called from within some kind of spinlock/non-preempt region...
 */
void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
		     pte_t *ptep, unsigned long pte, int huge)
{
	struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
	unsigned long vsid, vaddr;
	unsigned int psize;
	real_pte_t rpte;
	int i;

	i = batch->index;

	/* We mask the address for the base page size. Huge pages will
	 * have applied their own masking already
	 */
	addr &= PAGE_MASK;

	/* Get page size (maybe move back to caller).
	 *
	 * NOTE: when using special 64K mappings in 4K environment like
	 * for SPEs, we obtain the page size from the slice, which thus
	 * must still exist (and thus the VMA not reused) at the time
	 * of this call
	 */
	if (huge) {
#ifdef CONFIG_HUGETLB_PAGE
		psize = mmu_huge_psize;
#else
		BUG();
		psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
#endif
	} else
		psize = pte_pagesize_index(mm, addr, pte);

	/* Build full vaddr */
	if (!is_kernel_addr(addr)) {
		vsid = get_vsid(mm->context.id, addr);
		WARN_ON(vsid == 0);
	} else
		vsid = get_kernel_vsid(addr);
	vaddr = (vsid << 28 ) | (addr & 0x0fffffff);
	rpte = __real_pte(__pte(pte), ptep);

	/*
	 * Check if we have an active batch on this CPU. If not, just
	 * flush now and return. For now, we don global invalidates
	 * in that case, might be worth testing the mm cpu mask though
	 * and decide to use local invalidates instead...
	 */
	if (!batch->active) {
		flush_hash_page(vaddr, rpte, psize, 0);
		return;
	}

	/*
	 * This can happen when we are in the middle of a TLB batch and
	 * we encounter memory pressure (eg copy_page_range when it tries
	 * to allocate a new pte). If we have to reclaim memory and end
	 * up scanning and resetting referenced bits then our batch context
	 * will change mid stream.
	 *
	 * We also need to ensure only one page size is present in a given
	 * batch
	 */
	if (i != 0 && (mm != batch->mm || batch->psize != psize)) {
		__flush_tlb_pending(batch);
		i = 0;
	}
	if (i == 0) {
		batch->mm = mm;
		batch->psize = psize;
	}
	batch->pte[i] = rpte;
	batch->vaddr[i] = vaddr;
	batch->index = ++i;
	if (i >= PPC64_TLB_BATCH_NR)
		__flush_tlb_pending(batch);
}