/** * smp_cache_call - Issue an IPI to request the other CPUs flush caches * @opr_mask: Cache operation flags * @start: Start address of request * @end: End address of request * * Send cache flush IPI to other CPUs. This invokes smp_cache_interrupt() * above on those other CPUs and then waits for them to finish. * * The caller must hold smp_cache_lock. */ void smp_cache_call(unsigned long opr_mask, unsigned long start, unsigned long end) { smp_cache_mask = opr_mask; smp_cache_start = start; smp_cache_end = end; cpumask_copy(&smp_cache_ipi_map, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), &smp_cache_ipi_map); send_IPI_allbutself(FLUSH_CACHE_IPI); while (!cpumask_empty(&smp_cache_ipi_map)) /* nothing. lockup detection does not belong here */ mb(); }
int smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait) /* * [SUMMARY] Run a function on all other CPUs. * <func> The function to run. This must be fast and non-blocking. * <info> An arbitrary pointer to pass to the function. * <nonatomic> currently unused. * <wait> If true, wait (atomically) until function has completed on other CPUs. * [RETURNS] 0 on success, else a negative status code. Does not return until * remote CPUs are nearly ready to execute <<func>> or are or have executed. * * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. */ { struct call_data_struct data; int cpus = num_online_cpus()-1; if (!cpus) return 0; /* Can deadlock when called with interrupts disabled */ WARN_ON(irqs_disabled()); data.func = func; data.info = info; atomic_set(&data.started, 0); data.wait = wait; if (wait) atomic_set(&data.finished, 0); spin_lock(&call_lock); call_data = &data; wmb(); /* Send a message to all other CPUs and wait for them to respond */ send_IPI_allbutself(CALL_FUNCTION_VECTOR); /* Wait for response */ while (atomic_read(&data.started) != cpus) barrier(); if (wait) while (atomic_read(&data.finished) != cpus) barrier(); spin_unlock(&call_lock); return 0; }
/** * smp_call_function(): Run a function on all other CPUs. * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @nonatomic: currently unused. * @wait: If true, wait (atomically) until function has completed on other CPUs. * * Returns 0 on success, else a negative status code. Does not return until * remote CPUs are nearly ready to execute <<func>> or are or have executed. * * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. */ int smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait) { struct call_data_struct data; int cpus; /* Holding any lock stops cpus from going down. */ spin_lock(&call_lock); cpus = num_online_cpus() - 1; if (!cpus) { spin_unlock(&call_lock); return 0; } /* Can deadlock when called with interrupts disabled */ WARN_ON(irqs_disabled()); data.func = func; data.info = info; atomic_set(&data.started, 0); data.wait = wait; if (wait) atomic_set(&data.finished, 0); call_data = &data; mb(); /* Send a message to all other CPUs and wait for them to respond */ send_IPI_allbutself(CALL_FUNCTION_VECTOR); /* Wait for response */ while (atomic_read(&data.started) != cpus) barrier(); if (wait) while (atomic_read(&data.finished) != cpus) barrier(); spin_unlock(&call_lock); return 0; }
/* * this function sends a 'generic call function' IPI to all other CPUs * in the system. */ static void __smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait) { struct call_data_struct data; int cpus = num_online_cpus()-1; if (!cpus) return; data.func = func; data.info = info; atomic_set(&data.started, 0); data.wait = wait; if (wait) atomic_set(&data.finished, 0); call_data = &data; wmb(); /* Send a message to all other CPUs and wait for them to respond */ send_IPI_allbutself(CALL_FUNCTION_VECTOR); /* Wait for response */ while (atomic_read(&data.started) != cpus) #ifndef CONFIG_XEN cpu_relax(); #else barrier(); #endif if (wait) while (atomic_read(&data.finished) != cpus) #ifndef CONFIG_XEN cpu_relax(); #else barrier(); #endif }
static inline void smp_send_start(void) { send_IPI_allbutself(IPI_CPU_START); }
inline void smp_send_stop(void) { send_IPI_allbutself(IPI_CPU_STOP); }
static void smp_send_nmi_allbutself(void) { send_IPI_allbutself(APIC_DM_NMI); }
/* * this function sends a 'reschedule' IPI to all other CPUs. * This is used when RT tasks are starving and other CPUs * might be able to run them: */ void smp_send_reschedule_allbutself(void) { send_IPI_allbutself(RESCHEDULE_VECTOR); }
void smp_kdb_stop(void) { send_IPI_allbutself(KDB_VECTOR); }
void smp_send_all_nop(void) { send_IPI_allbutself(IPI_NOP); }
/*==========================================================================* * Name: smp_send_timer * * Description: This routine sends a 'LOCAL_TIMER_IPI' to all other CPUs * in the system. * * Born on Date: 2002.02.05 * * Arguments: NONE * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ void smp_send_timer(void) { send_IPI_allbutself(LOCAL_TIMER_IPI, 1); }
int smp_call_function (void (*func) (void *info), void *info, int retry, int wait) { struct smp_call_struct data; unsigned long timeout; static DEFINE_SPINLOCK(lock); int retries = 0; if (num_online_cpus() < 2) return 0; /* Can deadlock when called with interrupts disabled */ WARN_ON(irqs_disabled()); /* can also deadlock if IPIs are disabled */ WARN_ON((get_eiem() & (1UL<<(CPU_IRQ_MAX - IPI_IRQ))) == 0); data.func = func; data.info = info; data.wait = wait; atomic_set(&data.unstarted_count, num_online_cpus() - 1); atomic_set(&data.unfinished_count, num_online_cpus() - 1); if (retry) { spin_lock (&lock); while (smp_call_function_data != 0) barrier(); } else { spin_lock (&lock); if (smp_call_function_data) { spin_unlock (&lock); return -EBUSY; } } smp_call_function_data = &data; spin_unlock (&lock); /* Send a message to all other CPUs and wait for them to respond */ send_IPI_allbutself(IPI_CALL_FUNC); retry: /* Wait for response */ timeout = jiffies + HZ; while ( (atomic_read (&data.unstarted_count) > 0) && time_before (jiffies, timeout) ) barrier (); if (atomic_read (&data.unstarted_count) > 0) { printk(KERN_CRIT "SMP CALL FUNCTION TIMED OUT! (cpu=%d), try %d\n", smp_processor_id(), ++retries); goto retry; } /* We either got one or timed out. Release the lock */ mb(); smp_call_function_data = NULL; while (wait && atomic_read (&data.unfinished_count) > 0) barrier (); return 0; }
void smp_kdb_stop(void) { if (!KDB_FLAG(NOIPI)) send_IPI_allbutself(KDB_VECTOR); }
static void smp_send_nmi_allbutself(void) { send_IPI_allbutself(NMI_VECTOR); }
/* * this function calls the 'stop' function on all other CPUs in the system. */ void smp_send_stop (void) { send_IPI_allbutself(IPI_CPU_STOP); smp_num_cpus = 1; }
void __ipipe_send_IPI_allbutself (int vector) { send_IPI_allbutself(vector); }