static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
{
	int cpu;
	cpumask_t mask = { CPU_BITS_NONE };

	if (!erratum_a15_798181())
		return;

	preempt_disable();
	dummy_flush_tlb_a15_erratum();
	for_each_online_cpu(cpu) {
		if (cpu == smp_processor_id())
			continue;
		/*
		 * We only need to send an IPI if the other CPUs are running
		 * the same mm (and ASID) as the one being invalidated. There
		 * is no need for locking around the current_mm check since
		 * the switch_mm() function has a dmb() for this erratum in
		 * case a context switch happens on another CPU after the
		 * condition below.
		 */
		if (mm == per_cpu(current_mm, cpu))
			cpumask_set_cpu(cpu, &mask);
	}
	smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
	preempt_enable_no_resched();
}
static void broadcast_tlb_a15_erratum(void)
{
	if (!has_erratum_a15_798181)
		return;

	dummy_flush_tlb_a15_erratum();
	smp_call_function(ipi_flush_tlb_a15_erratum, NULL, 1);
}
static void broadcast_tlb_a15_erratum(void)
{
	if (!erratum_a15_798181())
		return;

	preempt_disable();
	dummy_flush_tlb_a15_erratum();
	smp_call_function_many(cpu_online_mask, ipi_flush_tlb_a15_erratum,
			       NULL, 1);
	preempt_enable_no_resched();
}
static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
{
	int this_cpu;
	cpumask_t mask = { CPU_BITS_NONE };

	if (!has_erratum_a15_798181)
		return;

	dummy_flush_tlb_a15_erratum();
	this_cpu = get_cpu();
	a15_erratum_get_cpumask(this_cpu, mm, &mask);
	smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
	put_cpu();
}