static void send_IPI_mask(cpumask_t mask, enum ipi_message_type op) { int cpu; for_each_cpu_mask(cpu, mask) ipi_send(cpu, op); }
static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) { unsigned cpu; cpus_and(mask, mask, cpu_online_map); for_each_cpu_mask(cpu, mask) xen_send_IPI_one(cpu, vector); }
static void boot_trace_init(struct trace_array *tr) { int cpu; boot_trace = tr; trace_boot_enabled = 0; for_each_cpu_mask(cpu, cpu_possible_map) tracing_reset(tr, cpu); }
int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu) { cpumask_t allbutself; unsigned int i, nr_cpus; int ret; BUG_ON(!local_irq_is_enabled()); allbutself = cpu_online_map; cpu_clear(smp_processor_id(), allbutself); nr_cpus = cpus_weight(allbutself); if ( nr_cpus == 0 ) { BUG_ON(cpu != smp_processor_id()); return (*fn)(data); } /* Note: We shouldn't spin on lock when it's held by others since others * is expecting this cpus to enter softirq context. Or else deadlock * is caused. */ if ( !spin_trylock(&stopmachine_lock) ) return -EBUSY; stopmachine_data.fn = fn; stopmachine_data.fn_data = data; stopmachine_data.nr_cpus = nr_cpus; stopmachine_data.fn_cpu = cpu; atomic_set(&stopmachine_data.done, 0); stopmachine_data.state = STOPMACHINE_START; smp_wmb(); for_each_cpu_mask ( i, allbutself ) cpu_raise_softirq(i, STOPMACHINE_SOFTIRQ); stopmachine_set_state(STOPMACHINE_PREPARE); local_irq_disable(); stopmachine_set_state(STOPMACHINE_DISABLE_IRQ); if ( cpu == smp_processor_id() ) stopmachine_data.fn_result = (*fn)(data); stopmachine_set_state(STOPMACHINE_INVOKE); ret = stopmachine_data.fn_result; stopmachine_set_state(STOPMACHINE_EXIT); local_irq_enable(); spin_unlock(&stopmachine_lock); return ret; }
static int kmem_trace_init(struct trace_array *tr) { int cpu; kmemtrace_array = tr; for_each_cpu_mask(cpu, cpu_possible_map) tracing_reset(tr, cpu); kmemtrace_start_probes(); return 0; }
/** * x86_64 specific code for carrying out inter-CPU function calls. * This function should not be called directly. Call xcall_function() instead. * * Arguments: * [IN] cpu_mask: The target CPUs of the cross-call. * [IN] func: The function to execute on each target CPU. * [IN] info: Argument to pass to func(). * [IN] wait: true = wait for cross-call to fully complete. * * Returns: * Success: 0 * Failure: Error code */ int arch_xcall_function( cpumask_t cpu_mask, void (*func)(void *info), void * info, bool wait ) { struct xcall_data_struct data; unsigned int num_cpus; unsigned int cpu; BUG_ON(irqs_disabled()); /* Count how many CPUs are being targeted */ num_cpus = cpus_weight(cpu_mask); if (!num_cpus) return 0; /* Fill in the xcall data structure on our stack */ data.func = func; data.info = info; atomic_set(&data.started, 0); if (wait) atomic_set(&data.finished, 0); data.wait = wait; /* Spin with IRQs enabled */ while (!spin_trylock_irq(&xcall_data_lock)) ; /* IRQs are now disabled */ /* Set the global xcall data pointer */ xcall_data = &data; wmb(); /* Send inter-processor interrupts to the target CPUs */ for_each_cpu_mask(cpu, cpu_mask) lapic_send_ipi(cpu, XCALL_FUNCTION_VECTOR); /* Wait for initiation responses */ while (atomic_read(&data.started) != num_cpus) cpu_relax(); /* If requested, wait for completion responses */ if (wait) { while (atomic_read(&data.finished) != num_cpus) cpu_relax(); } spin_unlock_irq(&xcall_data_lock); return 0; }
void cpuidle_wakeup_mwait(cpumask_t *mask) { cpumask_t target; unsigned int cpu; cpus_and(target, *mask, cpuidle_mwait_flags); /* CPU is MWAITing on the cpuidle_mwait_wakeup flag. */ for_each_cpu_mask(cpu, target) mwait_wakeup(cpu) = 0; cpus_andnot(*mask, *mask, target); }
void irq_complete_move(unsigned irq) { struct irq_cfg *cfg = &irq_cfg[irq]; cpumask_t cleanup_mask; int i; if (likely(!cfg->move_in_progress)) return; if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain))) return; cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); cfg->move_cleanup_count = cpus_weight(cleanup_mask); for_each_cpu_mask(i, cleanup_mask) platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0); cfg->move_in_progress = 0; }
static void force_quiescent_state(struct rcu_data *rdp, struct rcu_ctrlblk *rcp) { int cpu; cpumask_t cpumask; set_need_resched(); if (unlikely(!rcp->signaled)) { rcp->signaled = 1; /* * Don't send IPI to itself. With irqs disabled, * rdp->cpu is the current cpu. */ cpumask = rcp->cpumask; cpu_clear(rdp->cpu, cpumask); for_each_cpu_mask(cpu, cpumask) smp_send_reschedule(cpu); } }
static void __clear_irq_vector(int irq) { int vector, cpu; cpumask_t mask; cpumask_t domain; struct irq_cfg *cfg = &irq_cfg[irq]; BUG_ON((unsigned)irq >= NR_IRQS); BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED); vector = cfg->vector; domain = cfg->domain; cpus_and(mask, cfg->domain, cpu_online_map); for_each_cpu_mask(cpu, mask) per_cpu(vector_irq, cpu)[vector] = -1; cfg->vector = IRQ_VECTOR_UNASSIGNED; cfg->domain = CPU_MASK_NONE; irq_status[irq] = IRQ_UNUSED; cpus_andnot(vector_table[vector], vector_table[vector], domain); }
/* Called by boot processor to activate the rest. */ static void __init smp_init(void) { unsigned int cpu; unsigned highest = 0; for_each_cpu_mask(cpu, cpu_possible_map) highest = cpu; nr_cpu_ids = highest + 1; /* FIXME: This should be done in userspace --RR */ for_each_present_cpu(cpu) { if (num_online_cpus() >= max_cpus) break; if (!cpu_online(cpu)) cpu_up(cpu); } /* Any cleanup work */ printk(KERN_INFO "Brought up %ld CPUs\n", (long)num_online_cpus()); smp_cpus_done(max_cpus); }
static int __bind_irq_vector(int irq, int vector, cpumask_t domain) { cpumask_t mask; int cpu; struct irq_cfg *cfg = &irq_cfg[irq]; BUG_ON((unsigned)irq >= NR_IRQS); BUG_ON((unsigned)vector >= IA64_NUM_VECTORS); cpus_and(mask, domain, cpu_online_map); if (cpus_empty(mask)) return -EINVAL; if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain)) return 0; if (cfg->vector != IRQ_VECTOR_UNASSIGNED) return -EBUSY; for_each_cpu_mask(cpu, mask) per_cpu(vector_irq, cpu)[vector] = irq; cfg->vector = vector; cfg->domain = domain; irq_status[irq] = IRQ_USED; cpus_or(vector_table[vector], vector_table[vector], domain); return 0; }
/* * These functions send a 'generic call function' IPI to other online * CPUS in the system. * * [SUMMARY] Run a function on other CPUs. * <func> The function to run. This must be fast and non-blocking. * <info> An arbitrary pointer to pass to the function. * <nonatomic> currently unused. * <wait> If true, wait (atomically) until function has completed on other CPUs. * [RETURNS] 0 on success, else a negative status code. Does not return until * remote CPUs are nearly ready to execute <<func>> or are or have executed. * <map> is a cpu map of the cpus to send IPI to. * * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. */ static int __smp_call_function_map(void (*func) (void *info), void *info, int nonatomic, int wait, cpumask_t map) { struct call_data_struct data; int ret = -1, num_cpus; int cpu; u64 timeout; if (unlikely(smp_ops == NULL)) return ret; data.func = func; data.info = info; atomic_set(&data.started, 0); data.wait = wait; if (wait) atomic_set(&data.finished, 0); /* remove 'self' from the map */ if (cpu_isset(smp_processor_id(), map)) cpu_clear(smp_processor_id(), map); /* sanity check the map, remove any non-online processors. */ cpus_and(map, map, cpu_online_map); num_cpus = cpus_weight(map); if (!num_cpus) goto done; call_data = &data; smp_wmb(); /* Send a message to all CPUs in the map */ for_each_cpu_mask(cpu, map) smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION); timeout = get_tb() + (u64) SMP_CALL_TIMEOUT * tb_ticks_per_sec; /* Wait for indication that they have received the message */ while (atomic_read(&data.started) != num_cpus) { HMT_low(); if (get_tb() >= timeout) { printk("smp_call_function on cpu %d: other cpus not " "responding (%d)\n", smp_processor_id(), atomic_read(&data.started)); if (!ipi_fail_ok) debugger(NULL); goto out; } } /* optionally wait for the CPUs to complete */ if (wait) { while (atomic_read(&data.finished) != num_cpus) { HMT_low(); if (get_tb() >= timeout) { printk("smp_call_function on cpu %d: other " "cpus not finishing (%d/%d)\n", smp_processor_id(), atomic_read(&data.finished), atomic_read(&data.started)); debugger(NULL); goto out; } } } done: ret = 0; out: call_data = NULL; HMT_medium(); return ret; }
/** * percpu_depopulate_mask - depopulate per-cpu data for some cpu's * @__pdata: per-cpu data to depopulate * @mask: depopulate per-cpu data for cpu's selected through mask bits */ void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask) { int cpu; for_each_cpu_mask(cpu, *mask) percpu_depopulate(__pdata, cpu); }