static void arch_timer_stop(struct clock_event_device *clk) { pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n", clk->irq, smp_processor_id()); disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]); if (arch_timer_has_nonsecure_ppi()) disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]); clk->set_state_shutdown(clk); }
static void arch_timer_stop(struct clock_event_device *clk) { pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n", clk->irq, smp_processor_id()); if (arch_timer_use_virtual) disable_percpu_irq(arch_timer_ppi[VIRT_PPI]); else { disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]); if (arch_timer_ppi[PHYS_NONSECURE_PPI]) disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]); } clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk); }
/* * Whenever anyone tries to change modes, we just mask interrupts * and wait for the next event to get set. */ static int nps_clkevent_set_state(struct clock_event_device *dev) { nps_clkevent_rm_thread(); disable_percpu_irq(nps_timer0_irq); return 0; }
static void twd_timer_stop(void) { struct clock_event_device *clk = __this_cpu_ptr(twd_evt); twd_set_mode(CLOCK_EVT_MODE_UNUSED, clk); disable_percpu_irq(clk->irq); }
static void tegra_local_timer_stop(struct clock_event_device *evt) { unsigned int cpu = smp_processor_id(); evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); remove_irq(evt->irq, &tegra_cputimer_irq[cpu]); disable_percpu_irq(evt->irq); }
static int msm_local_timer_dying_cpu(unsigned int cpu) { struct clock_event_device *evt = per_cpu_ptr(msm_evt, cpu); evt->set_state_shutdown(evt); disable_percpu_irq(evt->irq); return 0; }
static void cpu_pmu_disable_percpu_irq(void *data) { struct arm_pmu *cpu_pmu = data; struct platform_device *pmu_device = cpu_pmu->plat_device; int irq = platform_get_irq(pmu_device, 0); cpumask_clear_cpu(smp_processor_id(), &cpu_pmu->active_irqs); disable_percpu_irq(irq); }
static void exynos4_local_timer_stop(struct clock_event_device *evt) { unsigned int cpu = smp_processor_id(); evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); if (mct_int_type == MCT_INT_SPI) if (cpu == 0) remove_irq(evt->irq, &mct_tick0_event_irq); else remove_irq(evt->irq, &mct_tick1_event_irq); else disable_percpu_irq(IRQ_MCT_LOCALTIMER); }
static int kvm_timer_cpu_notify(struct notifier_block *self, unsigned long action, void *cpu) { switch (action) { case CPU_STARTING: case CPU_STARTING_FROZEN: kvm_timer_init_interrupt(NULL); break; case CPU_DYING: case CPU_DYING_FROZEN: disable_percpu_irq(timer_irq.irq); break; } return NOTIFY_OK; }
static int arc_timer_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); evt->cpumask = cpumask_of(smp_processor_id()); switch (action & ~CPU_TASKS_FROZEN) { case CPU_STARTING: clockevents_config_and_register(evt, arc_timer_freq, 0, ULONG_MAX); enable_percpu_irq(arc_timer_irq, 0); break; case CPU_DYING: disable_percpu_irq(arc_timer_irq); break; } return NOTIFY_OK; }
static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { int cpu = (long)hcpu; struct clock_event_device *clk = per_cpu_ptr(&arch_timer_evt, cpu); switch(action) { case CPU_STARTING: case CPU_STARTING_FROZEN: arch_timer_setup(clk); break; case CPU_DYING: case CPU_DYING_FROZEN: pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n", clk->irq, cpu); disable_percpu_irq(clk->irq); arch_timer_set_mode(CLOCK_EVT_MODE_UNUSED, clk); break; } return NOTIFY_OK; }
static void gt_clockevents_stop(struct clock_event_device *clk) { gt_clockevent_shutdown(clk); disable_percpu_irq(clk->irq); }
static void armada_370_xp_timer_stop(struct clock_event_device *evt) { evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); disable_percpu_irq(evt->irq); }
static int nps_timer_dying_cpu(unsigned int cpu) { disable_percpu_irq(nps_timer0_irq); return 0; }
static int csky_mptimer_dying_cpu(unsigned int cpu) { disable_percpu_irq(csky_mptimer_irq); return 0; }
static void gt_clockevents_stop(struct clock_event_device *clk) { gt_clockevent_set_mode(CLOCK_EVT_MODE_UNUSED, clk); disable_percpu_irq(clk->irq); }
static void cpu_pmu_disable_percpu_irq(void *data) { int irq = *(int *)data; disable_percpu_irq(irq); }
static int vgic_init_cpu_dying(unsigned int cpu) { disable_percpu_irq(kvm_vgic_global_state.maint_irq); return 0; }
static void twd_timer_stop(struct clock_event_device *clk) { twd_set_mode(CLOCK_EVT_MODE_UNUSED, clk); disable_percpu_irq(clk->irq); }
static void armada_370_xp_timer_stop(struct clock_event_device *evt) { evt->set_state_shutdown(evt); disable_percpu_irq(evt->irq); }
static void gic_clockevent_cpu_exit(struct clock_event_device *cd) { disable_percpu_irq(gic_timer_irq); }
static int arc_timer_dying_cpu(unsigned int cpu) { disable_percpu_irq(arc_timer_irq); return 0; }