static inline void send_IPI_all (int op) { int i; for (i = 0; i < smp_num_cpus; i++) send_IPI_single(i, op); }
static inline void send_IPI_mask(const struct cpumask *mask, int op) { unsigned int cpu; for_each_cpu(cpu, mask) { send_IPI_single(cpu, op); }
/* * Called with preemption disabled. */ static inline void send_IPI_mask(cpumask_t mask, int op) { unsigned int cpu; for_each_cpu_mask(cpu, mask) { send_IPI_single(cpu, op); }
static inline void send_IPI_allbutself(enum ipi_message_type op) { int i; for (i = 0; i < parisc_max_cpus; i++) { if (cpu_online(i) && i != smp_processor_id()) send_IPI_single(i, op); } }
static inline void send_IPI_allbutself (int op) { unsigned int i; for_each_online_cpu(i) { if (i != smp_processor_id()) send_IPI_single(i, op); } }
static inline void send_IPI_allbutself(enum ipi_message_type op) { int i; for_each_online_cpu(i) { if (i != smp_processor_id()) send_IPI_single(i, op); } }
static inline void send_IPI_allbutself (int op) { int i; for (i = 0; i < smp_num_cpus; i++) { if (i != smp_processor_id()) send_IPI_single(i, op); } }
int __cpu_up(unsigned int cpu, struct task_struct *tidle) { /* Wait 5s total for all CPUs for them to come online */ static int timeout; for (; !cpumask_test_cpu(cpu, &cpu_started); timeout++) { if (timeout >= 50000) { pr_info("skipping unresponsive cpu%d\n", cpu); local_irq_enable(); return -EIO; } udelay(100); } local_irq_enable(); per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; /* Unleash the CPU! */ send_IPI_single(cpu, MSG_TAG_START_CPU); while (!cpumask_test_cpu(cpu, cpu_online_mask)) cpu_relax(); return 0; }
int smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int nonatomic, int wait) { struct call_data_struct data; int cpus = 1; if (cpuid == smp_processor_id()) { printk("%s: trying to call self\n", __FUNCTION__); return -EBUSY; } data.func = func; data.info = info; atomic_set(&data.started, 0); data.wait = wait; if (wait) atomic_set(&data.finished, 0); spin_lock_bh(&call_lock); call_data = &data; mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */ send_IPI_single(cpuid, IPI_CALL_FUNC); /* Wait for response */ while (atomic_read(&data.started) != cpus) cpu_relax(); if (wait) while (atomic_read(&data.finished) != cpus) cpu_relax(); call_data = NULL; spin_unlock_bh(&call_lock); return 0; }
void smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); }
void arch_send_call_function_single_ipi(int cpu) { send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE); }
static inline void send_IPI_self (int op) { send_IPI_single(smp_processor_id(), op); }