/* * this function sends a 'reschedule' IPI to all other CPUs. * This is used when RT tasks are starving and other CPUs * might be able to run them. */ void smp_send_reschedule_allbutself(void) { int cpu = smp_processor_id(); int i; for (i = 0; i < NR_CPUS; i++) if (cpu_online(i) && i != cpu) core_send_ipi(i, SMP_RESCHEDULE_YOURSELF); }
/* * Run a function on all other CPUs. * <func> The function to run. This must be fast and non-blocking. * <info> An arbitrary pointer to pass to the function. * <retry> If true, keep retrying until ready. * <wait> If true, wait until function has completed on other CPUs. * [RETURNS] 0 on success, else a negative status code. * * Does not return until remote CPUs are nearly ready to execute <func> * or are or have executed. * * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler: * * CPU A CPU B * Disable interrupts * smp_call_function() * Take call_lock * Send IPIs * Wait for all cpus to acknowledge IPI * CPU A has not responded, spin waiting * for cpu A to respond, holding call_lock * smp_call_function() * Spin waiting for call_lock * Deadlock Deadlock */ int smp_call_function (void (*func) (void *info), void *info, int retry, int wait) { struct call_data_struct data; int i, cpus = num_online_cpus() - 1; int cpu = smp_processor_id(); /* * Can die spectacularly if this CPU isn't yet marked online */ BUG_ON(!cpu_online(cpu)); if (!cpus) return 0; /* Can deadlock when called with interrupts disabled */ WARN_ON(irqs_disabled()); data.func = func; data.info = info; atomic_set(&data.started, 0); data.wait = wait; if (wait) atomic_set(&data.finished, 0); spin_lock(&smp_call_lock); call_data = &data; smp_mb(); /* Send a message to all other CPUs and wait for them to respond */ for_each_online_cpu(i) if (i != cpu) core_send_ipi(i, SMP_CALL_FUNCTION); /* Wait for response */ /* FIXME: lock-up detection, backtrace on lock-up */ while (atomic_read(&data.started) != cpus) barrier(); if (wait) while (atomic_read(&data.finished) != cpus) barrier(); call_data = NULL; spin_unlock(&smp_call_lock); return 0; }
/* * Run a function on all other CPUs. * <func> The function to run. This must be fast and non-blocking. * <info> An arbitrary pointer to pass to the function. * <retry> If true, keep retrying until ready. * <wait> If true, wait until function has completed on other CPUs. * [RETURNS] 0 on success, else a negative status code. * * Does not return until remote CPUs are nearly ready to execute <func> * or are or have executed. */ int smp_call_function (void (*func) (void *info), void *info, int retry, int wait) { struct call_data_struct data; int i, cpus = smp_num_cpus - 1; int cpu = smp_processor_id(); if (!cpus) return 0; data.func = func; data.info = info; atomic_set(&data.started, 0); data.wait = wait; if (wait) atomic_set(&data.finished, 0); spin_lock(&smp_call_lock); call_data = &data; /* Send a message to all other CPUs and wait for them to respond */ for (i = 0; i < smp_num_cpus; i++) if (i != cpu) core_send_ipi(i, SMP_CALL_FUNCTION); /* Wait for response */ /* FIXME: lock-up detection, backtrace on lock-up */ while (atomic_read(&data.started) != cpus) barrier(); if (wait) while (atomic_read(&data.finished) != cpus) barrier(); spin_unlock(&smp_call_lock); return 0; }
/* * this function sends a 'reschedule' IPI to another CPU. * it goes straight through and wastes no time serializing * anything. Worst case is that we lose a reschedule ... */ void smp_send_reschedule(int cpu) { core_send_ipi(cpu, SMP_RESCHEDULE_YOURSELF); }
void nlm_send_ipi_mask(const struct cpumask * mask, unsigned int action) { int cpu; for_each_cpu(cpu, mask){ core_send_ipi(cpu, action); }
void nlm_send_ipi_single(int cpu, unsigned int action) { core_send_ipi(cpu, action); }