Example #1
0
void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
{
	int local = mm_is_thread_local(mm);
	unsigned long ap = mmu_get_ap(mmu_virtual_psize);
	unsigned long pid, end;


	pid = mm ? mm->context.id : 0;
	if (unlikely(pid == MMU_NO_CONTEXT))
		goto no_context;

	/* 4k page size, just blow the world */
	if (PAGE_SIZE == 0x1000) {
		radix__flush_all_mm(mm);
		return;
	}

	/* Otherwise first do the PWC */
	if (local)
		_tlbiel_pid(pid, RIC_FLUSH_PWC);
	else
		_tlbie_pid(pid, RIC_FLUSH_PWC);

	/* Then iterate the pages */
	end = addr + HPAGE_PMD_SIZE;
	for (; addr < end; addr += PAGE_SIZE) {
		if (local)
			_tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
		else
			_tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
	}
no_context:
	preempt_enable();
}
Example #2
0
void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
				  unsigned long end, int psize)
{
	unsigned long pid;
	unsigned long addr;
	int local = mm_is_thread_local(mm);
	unsigned long ap = mmu_get_ap(psize);
	unsigned long page_size = 1UL << mmu_psize_defs[psize].shift;


	preempt_disable();
	pid = mm ? mm->context.id : 0;
	if (unlikely(pid == MMU_NO_CONTEXT))
		goto err_out;

	if (end == TLB_FLUSH_ALL ||
	    (end - start) > tlb_single_page_flush_ceiling * page_size) {
		if (local)
			_tlbiel_pid(pid, RIC_FLUSH_TLB);
		else
			_tlbie_pid(pid, RIC_FLUSH_TLB);
		goto err_out;
	}
	for (addr = start; addr < end; addr += page_size) {

		if (local)
			_tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
		else
			_tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
	}
err_out:
	preempt_enable();
}
Example #3
0
void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
				       int psize)
{
	unsigned long pid;

	preempt_disable();
	pid = mm->context.id;
	if (pid != MMU_NO_CONTEXT)
		_tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
	preempt_enable();
}
Example #4
0
void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
				 int psize)
{
	unsigned long pid;

	pid = mm->context.id;
	if (unlikely(pid == MMU_NO_CONTEXT))
		return;

	preempt_disable();
	if (!mm_is_thread_local(mm))
		_tlbie_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
	else
		_tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
	preempt_enable();
}
Example #5
0
void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
				 int psize)
{
	unsigned long pid;
	unsigned long ap = mmu_get_ap(psize);

	preempt_disable();
	pid = mm ? mm->context.id : 0;
	if (unlikely(pid == MMU_NO_CONTEXT))
		goto bail;
	if (!mm_is_thread_local(mm))
		_tlbie_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
	else
		_tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
bail:
	preempt_enable();
}
Example #6
0
void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
				 int psize)
{
	unsigned long pid;
	unsigned long ap = mmu_get_ap(psize);

	preempt_disable();
	pid = mm ? mm->context.id : 0;
	if (unlikely(pid == MMU_NO_CONTEXT))
		goto bail;
	if (!mm_is_thread_local(mm)) {
		int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);

		if (lock_tlbie)
			raw_spin_lock(&native_tlbie_lock);
		_tlbie_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
		if (lock_tlbie)
			raw_spin_unlock(&native_tlbie_lock);
	} else
		_tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
bail:
	preempt_enable();
}
Example #7
0
void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
				 int psize)
{
	unsigned long pid;

	pid = mm->context.id;
	if (unlikely(pid == MMU_NO_CONTEXT))
		return;

	preempt_disable();
	smp_mb(); /* see radix__flush_tlb_mm */
	if (!mm_is_thread_local(mm)) {
		if (unlikely(mm_is_singlethreaded(mm))) {
			exit_flush_lazy_tlbs(mm);
			goto local;
		}
		_tlbie_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
	} else {
local:
		_tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
	}
	preempt_enable();
}