示例#1
0
void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
{
	int local = mm_is_thread_local(mm);
	unsigned long ap = mmu_get_ap(mmu_virtual_psize);
	unsigned long pid, end;


	pid = mm ? mm->context.id : 0;
	if (unlikely(pid == MMU_NO_CONTEXT))
		goto no_context;

	/* 4k page size, just blow the world */
	if (PAGE_SIZE == 0x1000) {
		radix__flush_all_mm(mm);
		return;
	}

	/* Otherwise first do the PWC */
	if (local)
		_tlbiel_pid(pid, RIC_FLUSH_PWC);
	else
		_tlbie_pid(pid, RIC_FLUSH_PWC);

	/* Then iterate the pages */
	end = addr + HPAGE_PMD_SIZE;
	for (; addr < end; addr += PAGE_SIZE) {
		if (local)
			_tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
		else
			_tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
	}
no_context:
	preempt_enable();
}
示例#2
0
void radix__flush_tlb_mm(struct mm_struct *mm)
{
	unsigned long pid;

	pid = mm->context.id;
	if (unlikely(pid == MMU_NO_CONTEXT))
		return;

	preempt_disable();
	/*
	 * Order loads of mm_cpumask vs previous stores to clear ptes before
	 * the invalidate. See barrier in switch_mm_irqs_off
	 */
	smp_mb();
	if (!mm_is_thread_local(mm)) {
		if (unlikely(mm_is_singlethreaded(mm))) {
			exit_flush_lazy_tlbs(mm);
			goto local;
		}

		if (mm_needs_flush_escalation(mm))
			_tlbie_pid(pid, RIC_FLUSH_ALL);
		else
			_tlbie_pid(pid, RIC_FLUSH_TLB);
	} else {
local:
		_tlbiel_pid(pid, RIC_FLUSH_TLB);
	}
	preempt_enable();
}
示例#3
0
void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
				  unsigned long end, int psize)
{
	unsigned long pid;
	unsigned long addr;
	int local = mm_is_thread_local(mm);
	unsigned long ap = mmu_get_ap(psize);
	unsigned long page_size = 1UL << mmu_psize_defs[psize].shift;


	preempt_disable();
	pid = mm ? mm->context.id : 0;
	if (unlikely(pid == MMU_NO_CONTEXT))
		goto err_out;

	if (end == TLB_FLUSH_ALL ||
	    (end - start) > tlb_single_page_flush_ceiling * page_size) {
		if (local)
			_tlbiel_pid(pid, RIC_FLUSH_TLB);
		else
			_tlbie_pid(pid, RIC_FLUSH_TLB);
		goto err_out;
	}
	for (addr = start; addr < end; addr += page_size) {

		if (local)
			_tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
		else
			_tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
	}
err_out:
	preempt_enable();
}
示例#4
0
/*
 * Base TLB flushing operations:
 *
 *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
 *  - flush_tlb_page(vma, vmaddr) flushes one page
 *  - flush_tlb_range(vma, start, end) flushes a range of pages
 *  - flush_tlb_kernel_range(start, end) flushes kernel pages
 *
 *  - local_* variants of page and mm only apply to the current
 *    processor
 */
void radix__local_flush_tlb_mm(struct mm_struct *mm)
{
	unsigned long pid;

	preempt_disable();
	pid = mm->context.id;
	if (pid != MMU_NO_CONTEXT)
		_tlbiel_pid(pid, RIC_FLUSH_ALL);
	preempt_enable();
}
示例#5
0
void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
{
	unsigned long pid;
	struct mm_struct *mm = tlb->mm;

	preempt_disable();

	pid = mm->context.id;
	if (pid != MMU_NO_CONTEXT)
		_tlbiel_pid(pid, RIC_FLUSH_PWC);

	preempt_enable();
}
示例#6
0
void radix__flush_all_mm(struct mm_struct *mm)
{
	unsigned long pid;

	pid = mm->context.id;
	if (unlikely(pid == MMU_NO_CONTEXT))
		return;

	preempt_disable();
	if (!mm_is_thread_local(mm))
		_tlbie_pid(pid, RIC_FLUSH_ALL);
	else
		_tlbiel_pid(pid, RIC_FLUSH_ALL);
	preempt_enable();
}
示例#7
0
static void do_exit_flush_lazy_tlb(void *arg)
{
	struct mm_struct *mm = arg;
	unsigned long pid = mm->context.id;

	if (current->mm == mm)
		return; /* Local CPU */

	if (current->active_mm == mm) {
		/*
		 * Must be a kernel thread because sender is single-threaded.
		 */
		BUG_ON(current->mm);
		mmgrab(&init_mm);
		switch_mm(mm, &init_mm, current);
		current->active_mm = &init_mm;
		mmdrop(mm);
	}
	_tlbiel_pid(pid, RIC_FLUSH_ALL);
}
示例#8
0
void radix__flush_tlb_mm(struct mm_struct *mm)
{
	unsigned long pid;

	preempt_disable();
	pid = mm->context.id;
	if (unlikely(pid == MMU_NO_CONTEXT))
		goto no_context;

	if (!mm_is_thread_local(mm)) {
		int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);

		if (lock_tlbie)
			raw_spin_lock(&native_tlbie_lock);
		_tlbie_pid(pid, RIC_FLUSH_ALL);
		if (lock_tlbie)
			raw_spin_unlock(&native_tlbie_lock);
	} else
		_tlbiel_pid(pid, RIC_FLUSH_ALL);
no_context:
	preempt_enable();
}
示例#9
0
extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
{
	unsigned int pid = mm->context.id;

	if (unlikely(pid == MMU_NO_CONTEXT))
		return;

	/*
	 * If this context hasn't run on that CPU before and KVM is
	 * around, there's a slim chance that the guest on another
	 * CPU just brought in obsolete translation into the TLB of
	 * this CPU due to a bad prefetch using the guest PID on
	 * the way into the hypervisor.
	 *
	 * We work around this here. If KVM is possible, we check if
	 * any sibling thread is in KVM. If it is, the window may exist
	 * and thus we flush that PID from the core.
	 *
	 * A potential future improvement would be to mark which PIDs
	 * have never been used on the system and avoid it if the PID
	 * is new and the process has no other cpumask bit set.
	 */
	if (cpu_has_feature(CPU_FTR_HVMODE) && radix_enabled()) {
		int cpu = smp_processor_id();
		int sib = cpu_first_thread_sibling(cpu);
		bool flush = false;

		for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) {
			if (sib == cpu)
				continue;
			if (paca[sib].kvm_hstate.kvm_vcpu)
				flush = true;
		}
		if (flush)
			_tlbiel_pid(pid, RIC_FLUSH_ALL);
	}
}
示例#10
0
static void __flush_all_mm(struct mm_struct *mm, bool fullmm)
{
	unsigned long pid;

	pid = mm->context.id;
	if (unlikely(pid == MMU_NO_CONTEXT))
		return;

	preempt_disable();
	smp_mb(); /* see radix__flush_tlb_mm */
	if (!mm_is_thread_local(mm)) {
		if (unlikely(mm_is_singlethreaded(mm))) {
			if (!fullmm) {
				exit_flush_lazy_tlbs(mm);
				goto local;
			}
		}
		_tlbie_pid(pid, RIC_FLUSH_ALL);
	} else {
local:
		_tlbiel_pid(pid, RIC_FLUSH_ALL);
	}
	preempt_enable();
}
示例#11
0
static inline void __radix__flush_tlb_range(struct mm_struct *mm,
					unsigned long start, unsigned long end,
					bool flush_all_sizes)

{
	unsigned long pid;
	unsigned int page_shift = mmu_psize_defs[mmu_virtual_psize].shift;
	unsigned long page_size = 1UL << page_shift;
	unsigned long nr_pages = (end - start) >> page_shift;
	bool local, full;

	pid = mm->context.id;
	if (unlikely(pid == MMU_NO_CONTEXT))
		return;

	preempt_disable();
	smp_mb(); /* see radix__flush_tlb_mm */
	if (!mm_is_thread_local(mm)) {
		if (unlikely(mm_is_singlethreaded(mm))) {
			if (end != TLB_FLUSH_ALL) {
				exit_flush_lazy_tlbs(mm);
				goto is_local;
			}
		}
		local = false;
		full = (end == TLB_FLUSH_ALL ||
				nr_pages > tlb_single_page_flush_ceiling);
	} else {
is_local:
		local = true;
		full = (end == TLB_FLUSH_ALL ||
				nr_pages > tlb_local_single_page_flush_ceiling);
	}

	if (full) {
		if (local) {
			_tlbiel_pid(pid, RIC_FLUSH_TLB);
		} else {
			if (mm_needs_flush_escalation(mm))
				_tlbie_pid(pid, RIC_FLUSH_ALL);
			else
				_tlbie_pid(pid, RIC_FLUSH_TLB);
		}
	} else {
		bool hflush = flush_all_sizes;
		bool gflush = flush_all_sizes;
		unsigned long hstart, hend;
		unsigned long gstart, gend;

		if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
			hflush = true;

		if (hflush) {
			hstart = (start + PMD_SIZE - 1) & PMD_MASK;
			hend = end & PMD_MASK;
			if (hstart == hend)
				hflush = false;
		}

		if (gflush) {
			gstart = (start + PUD_SIZE - 1) & PUD_MASK;
			gend = end & PUD_MASK;
			if (gstart == gend)
				gflush = false;
		}

		asm volatile("ptesync": : :"memory");
		if (local) {
			__tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize);
			if (hflush)
				__tlbiel_va_range(hstart, hend, pid,
						PMD_SIZE, MMU_PAGE_2M);
			if (gflush)
				__tlbiel_va_range(gstart, gend, pid,
						PUD_SIZE, MMU_PAGE_1G);
			asm volatile("ptesync": : :"memory");
		} else {
			__tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize);
			if (hflush)
				__tlbie_va_range(hstart, hend, pid,
						PMD_SIZE, MMU_PAGE_2M);
			if (gflush)
				__tlbie_va_range(gstart, gend, pid,
						PUD_SIZE, MMU_PAGE_1G);
			fixup_tlbie();
			asm volatile("eieio; tlbsync; ptesync": : :"memory");
		}
	}
示例#12
0
void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
		     unsigned long end)

{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long pid;
	unsigned int page_shift = mmu_psize_defs[mmu_virtual_psize].shift;
	unsigned long page_size = 1UL << page_shift;
	unsigned long nr_pages = (end - start) >> page_shift;
	bool local, full;

#ifdef CONFIG_HUGETLB_PAGE
	if (is_vm_hugetlb_page(vma))
		return radix__flush_hugetlb_tlb_range(vma, start, end);
#endif

	pid = mm->context.id;
	if (unlikely(pid == MMU_NO_CONTEXT))
		return;

	preempt_disable();
	if (mm_is_thread_local(mm)) {
		local = true;
		full = (end == TLB_FLUSH_ALL ||
				nr_pages > tlb_local_single_page_flush_ceiling);
	} else {
		local = false;
		full = (end == TLB_FLUSH_ALL ||
				nr_pages > tlb_single_page_flush_ceiling);
	}

	if (full) {
		if (local) {
			_tlbiel_pid(pid, RIC_FLUSH_TLB);
		} else {
			if (mm_needs_flush_escalation(mm))
				_tlbie_pid(pid, RIC_FLUSH_ALL);
			else
				_tlbie_pid(pid, RIC_FLUSH_TLB);
		}
	} else {
		bool hflush = false;
		unsigned long hstart, hend;

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
		hstart = (start + HPAGE_PMD_SIZE - 1) >> HPAGE_PMD_SHIFT;
		hend = end >> HPAGE_PMD_SHIFT;
		if (hstart < hend) {
			hstart <<= HPAGE_PMD_SHIFT;
			hend <<= HPAGE_PMD_SHIFT;
			hflush = true;
		}
#endif

		asm volatile("ptesync": : :"memory");
		if (local) {
			__tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize);
			if (hflush)
				__tlbiel_va_range(hstart, hend, pid,
						HPAGE_PMD_SIZE, MMU_PAGE_2M);
			asm volatile("ptesync": : :"memory");
		} else {
			__tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize);
			if (hflush)
				__tlbie_va_range(hstart, hend, pid,
						HPAGE_PMD_SIZE, MMU_PAGE_2M);
			fixup_tlbie();
			asm volatile("eieio; tlbsync; ptesync": : :"memory");
		}
	}