static int __cpuinit arch_timer_setup(struct clock_event_device *clk) { clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP; clk->name = "arch_sys_timer"; clk->rating = 450; if (arch_timer_use_virtual) { clk->irq = arch_timer_ppi[VIRT_PPI]; clk->set_mode = arch_timer_set_mode_virt; clk->set_next_event = arch_timer_set_next_event_virt; } else { clk->irq = arch_timer_ppi[PHYS_SECURE_PPI]; clk->set_mode = arch_timer_set_mode_phys; clk->set_next_event = arch_timer_set_next_event_phys; } clk->cpumask = cpumask_of(smp_processor_id()); clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, NULL); clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff); if (arch_timer_use_virtual) enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0); else { enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0); if (arch_timer_ppi[PHYS_NONSECURE_PPI]) enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0); } arch_counter_set_user_access(); return 0; }
/* * clockevent setup for boot CPU */ static void __init arc_clockevent_setup(struct device_node *node) { struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); int ret; register_cpu_notifier(&arc_timer_cpu_nb); arc_timer_irq = irq_of_parse_and_map(node, 0); if (arc_timer_irq <= 0) panic("clockevent: missing irq"); ret = arc_get_timer_clk(node); if (ret) panic("clockevent: missing clk"); evt->irq = arc_timer_irq; evt->cpumask = cpumask_of(smp_processor_id()); clockevents_config_and_register(evt, arc_timer_freq, 0, ARC_TIMER_MAX); /* Needs apriori irq_set_percpu_devid() done in intc map function */ ret = request_percpu_irq(arc_timer_irq, timer_irq_handler, "Timer0 (per-cpu-tick)", evt); if (ret) panic("clockevent: unable to request irq\n"); enable_percpu_irq(arc_timer_irq, 0); }
static int __cpuinit arch_timer_setup(struct clock_event_device *clk) { __arch_timer_setup(ARCH_CP15_TIMER, clk); if (arch_timer_use_virtual) enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0); else { enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0); if (arch_timer_ppi[PHYS_NONSECURE_PPI]) enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0); } arch_counter_set_user_access(); return 0; }
static int __cpuinit twd_timer_setup(struct clock_event_device *clk) { struct clock_event_device **this_cpu_clk; if (!twd_clk) twd_clk = twd_get_clock(); if (!IS_ERR_OR_NULL(twd_clk)) twd_timer_rate = clk_get_rate(twd_clk); else twd_calibrate_rate(); __raw_writel(0, twd_base + TWD_TIMER_CONTROL); clk->name = "local_timer"; clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP; clk->rating = 350; clk->set_mode = twd_set_mode; clk->set_next_event = twd_set_next_event; clk->irq = twd_ppi; this_cpu_clk = __this_cpu_ptr(twd_evt); *this_cpu_clk = clk; clockevents_config_and_register(clk, twd_timer_rate, 0xf, 0xffffffff); enable_percpu_irq(clk->irq, 0); return 0; }
static int __cpuinit msm_local_timer_setup(struct clock_event_device *evt) { /* Use existing clock_event for cpu 0 */ if (!smp_processor_id()) return 0; writel_relaxed(0, event_base + TIMER_ENABLE); writel_relaxed(0, event_base + TIMER_CLEAR); writel_relaxed(~0, event_base + TIMER_MATCH_VAL); evt->irq = msm_clockevent.irq; evt->name = "local_timer"; evt->features = msm_clockevent.features; evt->rating = msm_clockevent.rating; evt->set_mode = msm_timer_set_mode; evt->set_next_event = msm_timer_set_next_event; evt->shift = msm_clockevent.shift; evt->mult = div_sc(GPT_HZ, NSEC_PER_SEC, evt->shift); evt->max_delta_ns = clockevent_delta2ns(0xf0000000, evt); evt->min_delta_ns = clockevent_delta2ns(4, evt); *__this_cpu_ptr(msm_evt.percpu_evt) = evt; clockevents_register_device(evt); enable_percpu_irq(evt->irq, 0); return 0; }
void arc_request_percpu_irq(int irq, int cpu, irqreturn_t (*isr)(int irq, void *dev), const char *irq_nm, void *percpu_dev) { /* Boot cpu calls request, all call enable */ if (!cpu) { int rc; /* * These 2 calls are essential to making percpu IRQ APIs work * Ideally these details could be hidden in irq chip map function * but the issue is IPIs IRQs being static (non-DT) and platform * specific, so we can't identify them there. */ irq_set_percpu_devid(irq); irq_modify_status(irq, IRQ_NOAUTOEN, 0); /* @irq, @clr, @set */ rc = request_percpu_irq(irq, isr, irq_nm, percpu_dev); if (rc) panic("Percpu IRQ request failed for %d\n", irq); } enable_percpu_irq(irq, 0); }
/* * Setup the local clock events for a CPU. */ static int armada_370_xp_timer_starting_cpu(unsigned int cpu) { struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu); u32 clr = 0, set = 0; if (timer25Mhz) set = TIMER0_25MHZ; else clr = TIMER0_25MHZ; local_timer_ctrl_clrset(clr, set); evt->name = "armada_370_xp_per_cpu_tick", evt->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC; evt->shift = 32, evt->rating = 300, evt->set_next_event = armada_370_xp_clkevt_next_event, evt->set_state_shutdown = armada_370_xp_clkevt_shutdown; evt->set_state_periodic = armada_370_xp_clkevt_set_periodic; evt->set_state_oneshot = armada_370_xp_clkevt_shutdown; evt->tick_resume = armada_370_xp_clkevt_shutdown; evt->irq = armada_370_xp_clkevt_irq; evt->cpumask = cpumask_of(cpu); clockevents_config_and_register(evt, timer_clk, 1, 0xfffffffe); enable_percpu_irq(evt->irq, 0); return 0; }
/* * Setup the local clock events for a CPU. */ static int armada_370_xp_timer_setup(struct clock_event_device *evt) { u32 clr = 0, set = 0; int cpu = smp_processor_id(); if (timer25Mhz) set = TIMER0_25MHZ; else clr = TIMER0_25MHZ; local_timer_ctrl_clrset(clr, set); evt->name = "armada_370_xp_per_cpu_tick", evt->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC; evt->shift = 32, evt->rating = 300, evt->set_next_event = armada_370_xp_clkevt_next_event, evt->set_mode = armada_370_xp_clkevt_mode, evt->irq = armada_370_xp_clkevt_irq; evt->cpumask = cpumask_of(cpu); clockevents_config_and_register(evt, timer_clk, 1, 0xfffffffe); enable_percpu_irq(evt->irq, 0); return 0; }
/* * Setup the local clock events for a CPU. */ static int __cpuinit generic_timer_setup(struct clock_event_device *clk) { struct clock_event_device **this_cpu_clk; pr_info("[ca7_timer]%s entry\n", __func__); generic_timer_calibrate_rate(); write_cntp_ctl(0x0); clk->name = "generic_timer"; clk->features = CLOCK_EVT_FEAT_ONESHOT; clk->rating = 350; clk->set_mode = generic_timer_set_mode; clk->set_next_event = generic_timer_set_next_event; clk->irq = timer_ppi; this_cpu_clk = __this_cpu_ptr(timer_evt); *this_cpu_clk = clk; clockevents_config_and_register(clk, generic_timer_rate, 0xf, 0x7fffffff); enable_percpu_irq(clk->irq, 0); return 0; }
/* * Setup the local clock events for a CPU. */ static int armada_370_xp_timer_setup(struct clock_event_device *evt) { u32 u; int cpu = smp_processor_id(); u = readl(local_base + TIMER_CTRL_OFF); if (timer25Mhz) writel(u | TIMER0_25MHZ, local_base + TIMER_CTRL_OFF); else writel(u & ~TIMER0_25MHZ, local_base + TIMER_CTRL_OFF); evt->name = "armada_370_xp_per_cpu_tick", evt->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC; evt->shift = 32, evt->rating = 300, evt->set_next_event = armada_370_xp_clkevt_next_event, evt->set_mode = armada_370_xp_clkevt_mode, evt->irq = armada_370_xp_clkevt_irq; evt->cpumask = cpumask_of(cpu); clockevents_config_and_register(evt, timer_clk, 1, 0xfffffffe); enable_percpu_irq(evt->irq, 0); return 0; }
static int msm_local_timer_setup(struct clock_event_device *evt) { int cpu = smp_processor_id(); int err; evt->irq = msm_timer_irq; evt->name = "msm_timer"; evt->features = CLOCK_EVT_FEAT_ONESHOT; evt->rating = 200; evt->set_mode = msm_timer_set_mode; evt->set_next_event = msm_timer_set_next_event; evt->cpumask = cpumask_of(cpu); clockevents_config_and_register(evt, GPT_HZ, 4, 0xffffffff); if (msm_timer_has_ppi) { enable_percpu_irq(evt->irq, IRQ_TYPE_EDGE_RISING); } else { err = request_irq(evt->irq, msm_timer_interrupt, IRQF_TIMER | IRQF_NOBALANCING | IRQF_TRIGGER_RISING, "gp_timer", evt); if (err) pr_err("request_irq failed\n"); } return 0; }
static int msm_local_timer_starting_cpu(unsigned int cpu) { struct clock_event_device *evt = per_cpu_ptr(msm_evt, cpu); int err; evt->irq = msm_timer_irq; evt->name = "msm_timer"; evt->features = CLOCK_EVT_FEAT_ONESHOT; evt->rating = 200; evt->set_state_shutdown = msm_timer_shutdown; evt->set_state_oneshot = msm_timer_shutdown; evt->tick_resume = msm_timer_shutdown; evt->set_next_event = msm_timer_set_next_event; evt->cpumask = cpumask_of(cpu); clockevents_config_and_register(evt, GPT_HZ, 4, 0xffffffff); if (msm_timer_has_ppi) { enable_percpu_irq(evt->irq, IRQ_TYPE_EDGE_RISING); } else { err = request_irq(evt->irq, msm_timer_interrupt, IRQF_TIMER | IRQF_NOBALANCING | IRQF_TRIGGER_RISING, "gp_timer", evt); if (err) pr_err("request_irq failed\n"); } return 0; }
static int nps_clkevent_set_next_event(unsigned long delta, struct clock_event_device *dev) { nps_clkevent_add_thread(delta); enable_percpu_irq(nps_timer0_irq, IRQ_TYPE_NONE); return 0; }
static int arch_timer_setup(struct clock_event_device *clk) { __arch_timer_setup(ARCH_CP15_TIMER, clk); if (arch_timer_use_virtual) enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0); else { enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0); if (arch_timer_ppi[PHYS_NONSECURE_PPI]) enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0); } arch_counter_set_user_access(); if (IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM)) arch_timer_configure_evtstream(); return 0; }
static void cpu_pmu_enable_percpu_irq(void *data) { struct arm_pmu *cpu_pmu = data; struct platform_device *pmu_device = cpu_pmu->plat_device; int irq = platform_get_irq(pmu_device, 0); enable_percpu_irq(irq, IRQ_TYPE_NONE); cpumask_set_cpu(smp_processor_id(), &cpu_pmu->active_irqs); }
static int arc_timer_starting_cpu(unsigned int cpu) { struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); evt->cpumask = cpumask_of(smp_processor_id()); clockevents_config_and_register(evt, arc_timer_freq, 0, ARC_TIMERN_MAX); enable_percpu_irq(arc_timer_irq, 0); return 0; }
/* * Setup the local clock events for a CPU. */ static int __cpuinit twd_timer_setup(struct clock_event_device *clk) { struct clock_event_device **this_cpu_clk; int cpu = smp_processor_id(); /* * If the basic setup for this CPU has been done before don't * bother with the below. */ if (per_cpu(percpu_setup_called, cpu)) { writel_relaxed(0, twd_base + TWD_TIMER_CONTROL); clockevents_register_device(*__this_cpu_ptr(twd_evt)); enable_percpu_irq(clk->irq, 0); return 0; } per_cpu(percpu_setup_called, cpu) = true; twd_calibrate_rate(); /* * The following is done once per CPU the first time .setup() is * called. */ writel_relaxed(0, twd_base + TWD_TIMER_CONTROL); clk->name = "local_timer"; clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP; clk->rating = 350; clk->set_mode = twd_set_mode; clk->set_next_event = twd_set_next_event; clk->irq = twd_ppi; this_cpu_clk = __this_cpu_ptr(twd_evt); *this_cpu_clk = clk; clockevents_config_and_register(clk, twd_timer_rate, 0xf, 0xffffffff); enable_percpu_irq(clk->irq, 0); return 0; }
static int nps_timer_starting_cpu(unsigned int cpu) { struct clock_event_device *evt = this_cpu_ptr(&nps_clockevent_device); evt->cpumask = cpumask_of(smp_processor_id()); clockevents_config_and_register(evt, nps_timer0_freq, 0, ULONG_MAX); enable_percpu_irq(nps_timer0_irq, IRQ_TYPE_NONE); return 0; }
static int arch_timer_starting_cpu(unsigned int cpu) { struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt); u32 flags; __arch_timer_setup(ARCH_CP15_TIMER, clk); flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]); enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags); if (arch_timer_has_nonsecure_ppi()) { flags = check_ppi_trigger(arch_timer_ppi[PHYS_NONSECURE_PPI]); enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], flags); } arch_counter_set_user_access(); if (evtstrm_enable) arch_timer_configure_evtstream(); return 0; }
/* * clock event for percpu */ static int csky_mptimer_starting_cpu(unsigned int cpu) { struct timer_of *to = per_cpu_ptr(&csky_to, cpu); to->clkevt.cpumask = cpumask_of(cpu); clockevents_config_and_register(&to->clkevt, timer_of_rate(to), 2, ULONG_MAX); enable_percpu_irq(csky_mptimer_irq, 0); return 0; }
static int local_timer_test(void *data) { int cpu = *(int*)data; printk("[%s]: thread for cpu%d start\n", __func__, cpu); enable_percpu_irq(GIC_PPI_PRIVATE_TIMER, 0); while (1) { wait_for_completion(¬ify[cpu]); test_operation(); complete(&ack); } printk("[%s]: thread for cpu%d stop\n", __func__, cpu); return 0; }
static void gic_clockevent_cpu_init(unsigned int cpu, struct clock_event_device *cd) { cd->name = "MIPS GIC"; cd->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP; cd->rating = 350; cd->irq = gic_timer_irq; cd->cpumask = cpumask_of(cpu); cd->set_next_event = gic_next_event; clockevents_config_and_register(cd, gic_frequency, 0x300, 0x7fffffff); enable_percpu_irq(gic_timer_irq, IRQ_TYPE_NONE); }
static int __cpuinit gt_clockevents_init(struct clock_event_device *clk) { int cpu = smp_processor_id(); clk->name = "arm_global_timer"; clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; clk->set_mode = gt_clockevent_set_mode; clk->set_next_event = gt_clockevent_set_next_event; clk->cpumask = cpumask_of(cpu); clk->rating = 300; clk->irq = gt_ppi; clockevents_config_and_register(clk, gt_clk_rate, 1, 0xffffffff); enable_percpu_irq(clk->irq, IRQ_TYPE_NONE); return 0; }
static void __init kronos_clockevent_init(int irq) { int ret; ret = setup_percpu_irq(irq, &kronos_timer_irq); if(ret) { pr_err("Sytem Timer IRQ register failed: %d \n", ret); BUG(); } enable_percpu_irq(irq, 0); clkevt.cpumask = cpumask_of(0); clkevt.irq = irq; clockevents_config_and_register(&clkevt, global_timer_freq, 0xf, 0xffffffff); }
static int __cpuinit tegra_local_timer_setup(struct clock_event_device *evt) { unsigned int cpu = smp_processor_id(); struct tegra_clock_event_device *clkevt; int ret; clkevt = this_cpu_ptr(&percpu_tegra_timer); clkevt->evt = evt; sprintf(clkevt->name, "tegra_timer%d", cpu); evt->name = clkevt->name; evt->cpumask = cpumask_of(cpu); evt->features = tegra_cputimer_clockevent.features; evt->rating = tegra_cputimer_clockevent.rating; evt->set_mode = tegra_cputimer_set_mode; evt->set_next_event = tegra_cputimer_set_next_event; clockevents_calc_mult_shift(evt, 1000000, 5); evt->max_delta_ns = clockevent_delta2ns(0x1fffffff, evt); evt->min_delta_ns = clockevent_delta2ns(0x1, evt); tegra_cputimer_irq[cpu].dev_id = clkevt; clockevents_register_device(evt); ret = setup_irq(tegra_cputimer_irq[cpu].irq, &tegra_cputimer_irq[cpu]); if (ret) { pr_err("Failed to register CPU timer IRQ for CPU %d: " \ "irq=%d, ret=%d\n", cpu, tegra_cputimer_irq[cpu].irq, ret); return ret; } evt->irq = tegra_cputimer_irq[cpu].irq; ret = irq_set_affinity(tegra_cputimer_irq[cpu].irq, cpumask_of(cpu)); if (ret) { pr_err("Failed to set affinity for CPU timer IRQ to " \ "CPU %d: irq=%d, ret=%d\n", cpu, tegra_cputimer_irq[cpu].irq, ret); return ret; } enable_percpu_irq(evt->irq, IRQ_TYPE_LEVEL_HIGH); return 0; }
static int __cpuinit exynos4_local_timer_setup(struct clock_event_device *evt) { struct mct_clock_event_device *mevt; unsigned int cpu = smp_processor_id(); mevt = this_cpu_ptr(&percpu_mct_tick); mevt->evt = evt; mevt->base = EXYNOS4_MCT_L_BASE(cpu); sprintf(mevt->name, "mct_tick%d", cpu); evt->name = mevt->name; evt->cpumask = cpumask_of(cpu); evt->set_next_event = exynos4_tick_set_next_event; evt->set_mode = exynos4_tick_set_mode; evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; evt->rating = 450; clockevents_calc_mult_shift(evt, clk_rate / 2, 5); evt->max_delta_ns = clockevent_delta2ns(0x7fffffff, evt); evt->min_delta_ns = clockevent_delta2ns(0xf, evt); clockevents_register_device(evt); exynos4_mct_write(0x1, mevt->base + MCT_L_TCNTB_OFFSET); if (mct_int_type == MCT_INT_SPI) { if (cpu == 0) { mct_tick0_event_irq.dev_id = mevt; evt->irq = IRQ_MCT_L0; setup_irq(IRQ_MCT_L0, &mct_tick0_event_irq); } else { mct_tick1_event_irq.dev_id = mevt; evt->irq = IRQ_MCT_L1; setup_irq(IRQ_MCT_L1, &mct_tick1_event_irq); irq_set_affinity(IRQ_MCT_L1, cpumask_of(1)); } } else { enable_percpu_irq(IRQ_MCT_LOCALTIMER, 0); } return 0; }
static void __init xen_percpu_init(void *unused) { struct vcpu_register_vcpu_info info; struct vcpu_info *vcpup; int err; int cpu = get_cpu(); pr_info("Xen: initializing cpu%d\n", cpu); vcpup = per_cpu_ptr(xen_vcpu_info, cpu); info.mfn = __pa(vcpup) >> PAGE_SHIFT; info.offset = offset_in_page(vcpup); err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info); BUG_ON(err); per_cpu(xen_vcpu, cpu) = vcpup; enable_percpu_irq(xen_events_irq, 0); }
static int arc_timer_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); evt->cpumask = cpumask_of(smp_processor_id()); switch (action & ~CPU_TASKS_FROZEN) { case CPU_STARTING: clockevents_config_and_register(evt, arc_timer_freq, 0, ULONG_MAX); enable_percpu_irq(arc_timer_irq, 0); break; case CPU_DYING: disable_percpu_irq(arc_timer_irq); break; } return NOTIFY_OK; }
static void __cpuinit arch_timer_setup(struct clock_event_device *clk) { /* Let's make sure the timer is off before doing anything else */ arch_timer_stop(); clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP; clk->name = "arch_sys_timer"; clk->rating = 400; clk->set_mode = arch_timer_set_mode; clk->set_next_event = arch_timer_set_next_event; clk->irq = arch_timer_ppi; clk->cpumask = cpumask_of(smp_processor_id()); clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff); enable_percpu_irq(clk->irq, 0); /* Ensure the physical counter is visible to userspace for the vDSO. */ arch_counter_enable_user_access(); }
static void kvm_timer_init_interrupt(void *info) { enable_percpu_irq(timer_irq.irq, 0); }