static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, unsigned long va) { int sender; union smp_flush_state *f; /* Caller has disabled preemption */ sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS; f = &per_cpu(flush_state, sender); /* Could avoid this lock when num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is probably not worth checking this for a cache-hot lock. */ spin_lock(&f->tlbstate_lock); f->flush_mm = mm; f->flush_va = va; cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask); /* * We have to send the IPI only to * CPUs affected. */ send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender); while (!cpus_empty(f->flush_cpumask)) cpu_relax(); f->flush_mm = NULL; f->flush_va = 0; spin_unlock(&f->tlbstate_lock); }
/* * smp_call_function - run a function on all other CPUs. * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @nonatomic: currently unused. * @wait: If true, wait (atomically) until function has completed on other * CPUs. * * Returns 0 on success, else a negative status code. Does not return until * remote CPUs are nearly ready to execute func or are or have executed. * * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. * Actually there are a few legal cases, like panic. */ int smp_call_function(void (*func) (void *info), void *info, int nonatomic, int wait) { cpumask_t thismask = CPU_MASK_NONE; cpus_or(thismask, cpu_online_map, thismask); smp_call_function_many(&thismask, func, info, nonatomic, wait); return 0; }
void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, unsigned long va) { cpumask_t cpumask = *cpumaskp; /* * A couple of (to be removed) sanity checks: * * - current CPU must not be in mask * - mask must exist :) */ BUG_ON(cpus_empty(cpumask)); BUG_ON(cpu_isset(smp_processor_id(), cpumask)); BUG_ON(!mm); #ifdef CONFIG_HOTPLUG_CPU /* If a CPU which we ran on has gone down, OK. */ cpus_and(cpumask, cpumask, cpu_online_map); if (unlikely(cpus_empty(cpumask))) return; #endif /* * i'm not happy about this global shared spinlock in the * MM hot path, but we'll see how contended it is. * AK: x86-64 has a faster method that could be ported. */ spin_lock(&tlbstate_lock); flush_mm = mm; flush_va = va; cpus_or(flush_cpumask, cpumask, flush_cpumask); /* * Make the above memory operations globally visible before * sending the IPI. */ smp_mb(); /* * We have to send the IPI only to * CPUs affected. */ send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR); while (!cpus_empty(flush_cpumask)) /* nothing. lockup detection does not belong here */ cpu_relax(); flush_mm = NULL; flush_va = 0; spin_unlock(&tlbstate_lock); }
void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, unsigned long va) { int sender; union smp_flush_state *f; cpumask_t cpumask = *cpumaskp; if (is_uv_system() && uv_flush_tlb_others(&cpumask, mm, va)) return; /* Caller has disabled preemption */ sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS; f = &per_cpu(flush_state, sender); /* * Could avoid this lock when * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is * probably not worth checking this for a cache-hot lock. */ spin_lock(&f->tlbstate_lock); f->flush_mm = mm; f->flush_va = va; cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask); /* * Make the above memory operations globally visible before * sending the IPI. */ smp_mb(); /* * We have to send the IPI only to * CPUs affected. */ send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender); while (!cpus_empty(f->flush_cpumask)) cpu_relax(); f->flush_mm = NULL; f->flush_va = 0; spin_unlock(&f->tlbstate_lock); }
static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, unsigned long va) { cpumask_t tmp; /* * A couple of (to be removed) sanity checks: * * - we do not send IPIs to not-yet booted CPUs. * - current CPU must not be in mask * - mask must exist :) */ BUG_ON(cpus_empty(cpumask)); cpus_and(tmp, cpumask, cpu_online_map); BUG_ON(!cpus_equal(tmp, cpumask)); BUG_ON(cpu_isset(smp_processor_id(), cpumask)); if (!mm) BUG(); /* * I'm not happy about this global shared spinlock in the * MM hot path, but we'll see how contended it is. * Temporarily this turns IRQs off, so that lockups are * detected by the NMI watchdog. */ spin_lock(&tlbstate_lock); flush_mm = mm; flush_va = va; cpus_or(flush_cpumask, cpumask, flush_cpumask); /* * We have to send the IPI only to * CPUs affected. */ send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR); while (!cpus_empty(flush_cpumask)) mb(); /* nothing. lockup detection does not belong here */; flush_mm = NULL; flush_va = 0; spin_unlock(&tlbstate_lock); }
void domain_update_node_affinity(struct domain *d) { cpumask_t cpumask = CPU_MASK_NONE; nodemask_t nodemask = NODE_MASK_NONE; struct vcpu *v; unsigned int node; spin_lock(&d->node_affinity_lock); for_each_vcpu ( d, v ) cpus_or(cpumask, cpumask, v->cpu_affinity); for_each_online_node ( node ) if ( cpus_intersects(node_to_cpumask(node), cpumask) ) node_set(node, nodemask); d->node_affinity = nodemask; spin_unlock(&d->node_affinity_lock); }
static int __bind_irq_vector(int irq, int vector, cpumask_t domain) { cpumask_t mask; int cpu; struct irq_cfg *cfg = &irq_cfg[irq]; BUG_ON((unsigned)irq >= NR_IRQS); BUG_ON((unsigned)vector >= IA64_NUM_VECTORS); cpus_and(mask, domain, cpu_online_map); if (cpus_empty(mask)) return -EINVAL; if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain)) return 0; if (cfg->vector != IRQ_VECTOR_UNASSIGNED) return -EBUSY; for_each_cpu_mask(cpu, mask) per_cpu(vector_irq, cpu)[vector] = irq; cfg->vector = vector; cfg->domain = domain; irq_status[irq] = IRQ_USED; cpus_or(vector_table[vector], vector_table[vector], domain); return 0; }