static void poll_channel(struct vmbus_channel *channel) { if (channel->target_cpu != smp_processor_id()) smp_call_function_single(channel->target_cpu, hv_kvp_onchannelcallback, channel, true); else hv_kvp_onchannelcallback(channel); }
static int hw_breakpoint_reset_notify(struct notifier_block *self, unsigned long action, void *hcpu) { int cpu = (long)hcpu; if (action == CPU_ONLINE) smp_call_function_single(cpu, reset_ctrl_regs, NULL, 1); return NOTIFY_OK; }
/* * Powerstate information: The system enters/leaves a state, where * affected devices might stop. */ void tick_broadcast_on_off(unsigned long reason, int *oncpu) { if (!cpu_isset(*oncpu, cpu_online_map)) printk(KERN_ERR "tick-broadcast: ignoring broadcast for " "offline CPU #%d\n", *oncpu); else smp_call_function_single(*oncpu, tick_do_broadcast_on_off, &reason, 1); }
static void bts_trace_start(struct trace_array *tr) { int cpu; tracing_reset_online_cpus(tr); for_each_cpu(cpu, cpu_possible_mask) smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1); }
static inline void do_cpuid(int cpu, u32 reg, u32 * data) { struct cpuid_command cmd; cmd.reg = reg; cmd.data = data; smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1, 1); }
RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2) { int rc; RTMPARGS Args; RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; Args.pfnWorker = pfnWorker; Args.pvUser1 = pvUser1; Args.pvUser2 = pvUser2; Args.idCpu = idCpu; Args.cHits = 0; if (!RTMpIsCpuPossible(idCpu)) return VERR_CPU_NOT_FOUND; RTThreadPreemptDisable(&PreemptState); if (idCpu != RTMpCpuId()) { if (RTMpIsCpuOnline(idCpu)) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) rc = smp_call_function_single(idCpu, rtmpLinuxWrapper, &Args, 1 /* wait */); #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) rc = smp_call_function_single(idCpu, rtmpLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */); #else /* older kernels */ rc = smp_call_function(rtmpOnSpecificLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */); #endif /* older kernels */ Assert(rc == 0); rc = Args.cHits ? VINF_SUCCESS : VERR_CPU_OFFLINE; } else rc = VERR_CPU_OFFLINE; } else { rtmpLinuxWrapper(&Args); rc = VINF_SUCCESS; } RTThreadPreemptRestore(&PreemptState);; NOREF(rc); return rc; }
static int __cpuinit kvm_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { int cpu = (unsigned long)hcpu; switch (action) { case CPU_ONLINE: case CPU_DOWN_FAILED: case CPU_ONLINE_FROZEN: smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0); break; case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1); break; default: break; } return NOTIFY_OK; }
/** * Save current fixed-range MTRR state of the BSP */ void mtrr_save_state(void) { int cpu = get_cpu(); if (cpu == 0) mtrr_save_fixed_ranges(NULL); else smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1, 1); put_cpu(); }
static int cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg) { long cpu = (long)arg; if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 1); return NOTIFY_DONE; }
/* Select a source on the primary MUX. */ static void set_pri_clk_src(struct scalable *sc, u32 pri_src_sel) { int cpu = sc - drv.scalable; if (sc != &drv.scalable[L2] && cpu_online(cpu)) { struct set_clk_src_args args = { .sc = sc, .src_sel = pri_src_sel, }; smp_call_function_single(cpu, __set_cpu_pri_clk_src, &args, 1); } else {
int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs) { int err; struct msr_regs_info rv; rv.regs = regs; rv.err = -EIO; err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1); return err ? err : rv.err; }
/** * speedstep_target - set a new CPUFreq policy * @policy: new policy * @index: index of target frequency * * Sets a new CPUFreq policy. */ static int speedstep_target(struct cpufreq_policy *policy, unsigned int index) { unsigned int policy_cpu; policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask); smp_call_function_single(policy_cpu, _speedstep_set_state, &index, true); return 0; }
static int __cpuinit cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { long cpu = (long) hcpu; switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: set_cpu_irq_affinity(cpu); smp_call_function_single(cpu, start_pmu, &pmu_paused, 1); break; case CPU_DEAD: case CPU_DEAD_FROZEN: smp_call_function_single(cpu, stop_pmu, NULL, 1); break; } return NOTIFY_OK; }
static unsigned int speedstep_get(unsigned int cpu) { unsigned int speed; /* You're supposed to ensure CPU is online. */ if (smp_call_function_single(cpu, get_freq_data, &speed, 1) != 0) BUG(); pr_debug("detected %u kHz as current frequency\n", speed); return speed; }
static int twd_cpufreq_transition(struct notifier_block *nb, unsigned long state, void *data) { struct cpufreq_freqs *freqs = data; if (state == CPUFREQ_POSTCHANGE || state == CPUFREQ_RESUMECHANGE) smp_call_function_single(freqs->cpu, twd_update_frequency, NULL, 1); return NOTIFY_OK; }
/* * Powerstate information: The system enters/leaves a state, where * affected devices might stop. */ void tick_broadcast_on_off(unsigned long reason, int *oncpu) { int cpu = get_cpu(); if (cpu == *oncpu) tick_do_broadcast_on_off(&reason); else smp_call_function_single(*oncpu, tick_do_broadcast_on_off, &reason, 1, 1); put_cpu(); }
static int jtag_mm_etm_probe(struct platform_device *pdev, uint32_t cpu) { struct etm_ctx *etmdata; struct resource *res; struct device *dev = &pdev->dev; /* Allocate memory per cpu */ etmdata = devm_kzalloc(dev, sizeof(struct etm_ctx), GFP_KERNEL); if (!etmdata) return -ENOMEM; etm[cpu] = etmdata; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "etm-base"); if (!res) return -ENODEV; etmdata->base = devm_ioremap(dev, res->start, resource_size(res)); if (!etmdata->base) return -EINVAL; /* Allocate etm state save space per core */ etmdata->state = devm_kzalloc(dev, MAX_ETM_STATE_SIZE * sizeof(uint64_t), GFP_KERNEL); if (!etmdata->state) return -ENOMEM; spin_lock_init(&etmdata->spinlock); mutex_init(&etmdata->mutex); if (cnt++ == 0) register_hotcpu_notifier(&jtag_mm_etm_notifier); if (!smp_call_function_single(cpu, etm_init_arch_data, etmdata, 1)) etmdata->init = true; if (etmdata->init) { mutex_lock(&etmdata->mutex); if (etm_arch_supported(etmdata->arch)) { if (scm_get_feat_version(TZ_DBG_ETM_FEAT_ID) < TZ_DBG_ETM_VER) etmdata->save_restore_enabled = true; else pr_info("etm save-restore supported by TZ\n"); } else pr_info("etm arch %u not supported\n", etmdata->arch); etmdata->enable = true; mutex_unlock(&etmdata->mutex); } return 0; }
static ssize_t show_pw20_state(struct device *dev, struct device_attribute *attr, char *buf) { u32 value; unsigned int cpu = dev->id; smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1); value &= PWRMGTCR0_PW20_WAIT; return sprintf(buf, "%u\n", value ? 1 : 0); }
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) { int err; struct msr_info rv; rv.msr_no = msr_no; err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1); *l = rv.l; *h = rv.h; return err ? err : rv.err; }
int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) { int err; struct msr_info rv; rv.msr_no = msr_no; rv.l = l; rv.h = h; err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1); return err ? err : rv.err; }
static int boot_secondary(unsigned int cpu, struct task_struct *ts) { unsigned long timeout = jiffies + msecs_to_jiffies(1000); unsigned long ccount; int i; #ifdef CONFIG_HOTPLUG_CPU WRITE_ONCE(cpu_start_id, cpu); /* Pairs with the third memw in the cpu_restart */ mb(); system_flush_invalidate_dcache_range((unsigned long)&cpu_start_id, sizeof(cpu_start_id)); #endif smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1); for (i = 0; i < 2; ++i) { do ccount = get_ccount(); while (!ccount); WRITE_ONCE(cpu_start_ccount, ccount); do { /* * Pairs with the first two memws in the * .Lboot_secondary. */ mb(); ccount = READ_ONCE(cpu_start_ccount); } while (ccount && time_before(jiffies, timeout)); if (ccount) { smp_call_function_single(0, mx_cpu_stop, (void *)cpu, 1); WRITE_ONCE(cpu_start_ccount, 0); return -EIO; } } return 0; }
/* * return: * 0: enable * 1: disable * -1: error */ int kfm_cache_status_on_cpu(int cpu) { int ret = 0; g_cache_status = 0; if((ret = smp_call_function_single(cpu, __kfm_caches_status, NULL, 1))) return ret; KFM_DBG(6, "get cpu %d cache status", cpu); return g_cache_status; }
void cpuhp_report_idle_dead(void) { struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); BUG_ON(st->state != CPUHP_AP_OFFLINE); rcu_report_dead(smp_processor_id()); st->state = CPUHP_AP_IDLE_DEAD; /* * We cannot call complete after rcu_report_dead() so we delegate it * to an online cpu. */ smp_call_function_single(cpumask_first(cpu_online_mask), cpuhp_complete_idle_dead, st, 0); }
int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) { int err; struct msr_info rv; memset(&rv, 0, sizeof(rv)); rv.msr_no = msr_no; rv.reg.l = l; rv.reg.h = h; err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1); return err; }
bool PlatformRunOnAllCpus(PlatformOnAllCpusFunc func, void* data) { int cpu; RunOnAllCpusCtxt cpuCtxt; cpuCtxt.func = func; cpuCtxt.data = data; for_each_online_cpu(cpu) cpuCtxt.result = false; smp_call_function_single(cpu, RunOnAllCpusWorker, &cpuCtxt, 1); if (!cpuCtxt.result) return false; return true; }
/* * Called to set the hrtick timer state. * * called with rq->lock held and irqs disabled */ void hrtick_start(struct rq *rq, int delay) { struct hrtimer *timer = &rq->hrtick_timer; int time = ktime_add_ns(timer->base->get_time(), delay); hrtimer_set_expires(timer, time); if (rq == this_rq()) { hrtimer_restart(timer); } else if (!rq->hrtick_csd_pending) { smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0); rq->hrtick_csd_pending = 1; } }
int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) { int err; struct msr_info rv; memset(&rv, 0, sizeof(rv)); rv.msr_no = msr_no; err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1); *l = rv.reg.l; *h = rv.reg.h; return err; }
static void update_timers(int cpu) { /* * Make sure that perf event counter will adopt to a new * sampling period. Updating the sampling period directly would * be much nicer but we do not have an API for that now so * let's use a big hammer. * Hrtimer will adopt the new period on the next tick but this * might be late already so we have to restart the timer as well. */ watchdog_nmi_disable(cpu); smp_call_function_single(cpu, restart_watchdog_hrtimer, NULL, 1); watchdog_nmi_enable(cpu); }
void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) { preempt_disable(); if (smp_processor_id() == cpu) wrmsr(msr_no, l, h); else { struct msr_info rv; rv.msr_no = msr_no; rv.l = l; rv.h = h; smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 0, 1); } preempt_enable(); }
int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel) { struct msm_spm_vdd_info info; int ret; info.cpu = cpu; info.vlevel = vlevel; /* Set to true to block on vdd change */ ret = smp_call_function_single(cpu, msm_spm_smp_set_vdd, &info, true); if (!ret) ret = info.err; return ret; }