static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) { unsigned cpu; cpus_and(mask, mask, cpu_online_map); for_each_cpu_mask_nr(cpu, mask) xen_send_IPI_one(cpu, vector); }
static unsigned int steal_context_smp(unsigned int id) { struct mm_struct *mm; unsigned int cpu, max; again: max = last_context - first_context; /* Attempt to free next_context first and then loop until we manage */ while (max--) { /* Pick up the victim mm */ mm = context_mm[id]; /* We have a candidate victim, check if it's active, on SMP * we cannot steal active contexts */ if (mm->context.active) { id++; if (id > last_context) id = first_context; continue; } pr_debug("[%d] steal context %d from mm @%p\n", smp_processor_id(), id, mm); /* Mark this mm has having no context anymore */ mm->context.id = MMU_NO_CONTEXT; /* Mark it stale on all CPUs that used this mm */ for_each_cpu_mask_nr(cpu, mm->cpu_vm_mask) __set_bit(id, stale_map[cpu]); return id; } /* This will happen if you have more CPUs than available contexts, * all we can do here is wait a bit and try again */ spin_unlock(&context_lock); cpu_relax(); spin_lock(&context_lock); goto again; }
/** * percpu_depopulate_mask - depopulate per-cpu data for some cpu's * @__pdata: per-cpu data to depopulate * @mask: depopulate per-cpu data for cpu's selected through mask bits */ static void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask) { int cpu; for_each_cpu_mask_nr(cpu, *mask) percpu_depopulate(__pdata, cpu); }