static int __init arch_timer_register(void) { int err; int ppi; arch_timer_evt = alloc_percpu(struct clock_event_device); if (!arch_timer_evt) { err = -ENOMEM; goto out; } if (arch_timer_use_virtual) { ppi = arch_timer_ppi[VIRT_PPI]; err = request_percpu_irq(ppi, arch_timer_handler_virt, "arch_timer", arch_timer_evt); } else { ppi = arch_timer_ppi[PHYS_SECURE_PPI]; err = request_percpu_irq(ppi, arch_timer_handler_phys, "arch_timer", arch_timer_evt); if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) { ppi = arch_timer_ppi[PHYS_NONSECURE_PPI]; err = request_percpu_irq(ppi, arch_timer_handler_phys, "arch_timer", arch_timer_evt); if (err) free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], arch_timer_evt); } } if (err) { pr_err("arch_timer: can't register interrupt %d (%d)\n", ppi, err); goto out_free; } err = register_cpu_notifier(&arch_timer_cpu_nb); if (err) goto out_free_irq; /* Immediately configure the timer on the boot CPU */ arch_timer_setup(this_cpu_ptr(arch_timer_evt)); return 0; out_free_irq: if (arch_timer_use_virtual) free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt); else { free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], arch_timer_evt); if (arch_timer_ppi[PHYS_NONSECURE_PPI]) free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], arch_timer_evt); } out_free: free_percpu(arch_timer_evt); out: return err; }
static int __init twd_local_timer_common_register(void) { int err; twd_evt = alloc_percpu(struct clock_event_device *); if (!twd_evt) { err = -ENOMEM; goto out_free; } err = request_percpu_irq(twd_ppi, twd_handler, "twd", twd_evt); if (err) { pr_err("twd: can't register interrupt %d (%d)\n", twd_ppi, err); goto out_free; } err = local_timer_register(&twd_lt_ops); if (err) goto out_irq; #ifdef CONFIG_IPIPE_DEBUG_INTERNAL __ipipe_mach_hrtimer_debug = &twd_hrtimer_debug; #endif /* CONFIG_IPIPE_DEBUG_INTERNAL */ return 0; out_irq: free_percpu_irq(twd_ppi, twd_evt); out_free: iounmap(twd_base); twd_base = NULL; free_percpu(twd_evt); return err; }
static int __init twd_local_timer_common_register(void) { int err; twd_evt = alloc_percpu(struct clock_event_device *); if (!twd_evt) { err = -ENOMEM; goto out_free; } err = request_percpu_irq(twd_ppi, twd_handler, "twd", twd_evt); if (err) { pr_err("twd: can't register interrupt %d (%d)\n", twd_ppi, err); goto out_free; } err = local_timer_register(&twd_lt_ops); if (err) goto out_irq; return 0; out_irq: free_percpu_irq(twd_ppi, twd_evt); out_free: iounmap(twd_base); twd_base = NULL; free_percpu(twd_evt); return err; }
void arc_request_percpu_irq(int irq, int cpu, irqreturn_t (*isr)(int irq, void *dev), const char *irq_nm, void *percpu_dev) { /* Boot cpu calls request, all call enable */ if (!cpu) { int rc; /* * These 2 calls are essential to making percpu IRQ APIs work * Ideally these details could be hidden in irq chip map function * but the issue is IPIs IRQs being static (non-DT) and platform * specific, so we can't identify them there. */ irq_set_percpu_devid(irq); irq_modify_status(irq, IRQ_NOAUTOEN, 0); /* @irq, @clr, @set */ rc = request_percpu_irq(irq, isr, irq_nm, percpu_dev); if (rc) panic("Percpu IRQ request failed for %d\n", irq); } enable_percpu_irq(irq, 0); }
void local_timer_test_init(void) { int err = 0; int i = 0; unsigned char name[10] = {'\0'}; struct task_struct *thread[nr_cpu_ids]; static struct clock_event_device *evt; err = request_gpt(GPT6, GPT_FREE_RUN, GPT_CLK_SRC_SYS, GPT_CLK_DIV_1, 0, NULL, 0); if (err) { printk(KERN_ERR "fail to request gpt, err=%d\n", err); } err = request_percpu_irq(GIC_PPI_PRIVATE_TIMER, test_handler, "timer", &evt); if (err) { printk(KERN_ERR "can't register interrupt %d, err=%d\n", GIC_PPI_PRIVATE_TIMER, err); } init_completion(&ack); for (i = 0; i < nr_cpu_ids; i++) { cpuid[i] = i; init_completion(¬ify[i]); sprintf(name, "timer-%d", i); thread[i] = kthread_create(local_timer_test, &cpuid[i], name); if (IS_ERR(thread[i])) { err = PTR_ERR(thread[i]); thread[i] = NULL; printk(KERN_ERR "[%s]: kthread_create %s fail(%d)\n", __func__, name, err); return; } kthread_bind(thread[i], i); wake_up_process(thread[i]); } }
/* * clockevent setup for boot CPU */ static void __init arc_clockevent_setup(struct device_node *node) { struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); int ret; register_cpu_notifier(&arc_timer_cpu_nb); arc_timer_irq = irq_of_parse_and_map(node, 0); if (arc_timer_irq <= 0) panic("clockevent: missing irq"); ret = arc_get_timer_clk(node); if (ret) panic("clockevent: missing clk"); evt->irq = arc_timer_irq; evt->cpumask = cpumask_of(smp_processor_id()); clockevents_config_and_register(evt, arc_timer_freq, 0, ARC_TIMER_MAX); /* Needs apriori irq_set_percpu_devid() done in intc map function */ ret = request_percpu_irq(arc_timer_irq, timer_irq_handler, "Timer0 (per-cpu-tick)", evt); if (ret) panic("clockevent: unable to request irq\n"); enable_percpu_irq(arc_timer_irq, 0); }
/* * clockevent setup for boot CPU */ static int __init arc_clockevent_setup(struct device_node *node) { struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); int ret; arc_timer_irq = irq_of_parse_and_map(node, 0); if (arc_timer_irq <= 0) { pr_err("clockevent: missing irq"); return -EINVAL; } ret = arc_get_timer_clk(node); if (ret) { pr_err("clockevent: missing clk"); return ret; } /* Needs apriori irq_set_percpu_devid() done in intc map function */ ret = request_percpu_irq(arc_timer_irq, timer_irq_handler, "Timer0 (per-cpu-tick)", evt); if (ret) { pr_err("clockevent: unable to request irq\n"); return ret; } ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING, "AP_ARC_TIMER_STARTING", arc_timer_starting_cpu, arc_timer_dying_cpu); if (ret) { pr_err("Failed to setup hotplug state"); return ret; } return 0; }
static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) { int i, err, irq, irqs; struct platform_device *pmu_device = cpu_pmu->plat_device; struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events; if (!pmu_device) return -ENODEV; irqs = min(pmu_device->num_resources, num_possible_cpus()); if (irqs < 1) { pr_warn_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n"); return 0; } irq = platform_get_irq(pmu_device, 0); if (irq >= 0 && irq_is_percpu(irq)) { err = request_percpu_irq(irq, handler, "arm-pmu", &hw_events->percpu_pmu); if (err) { pr_err("unable to request IRQ%d for ARM PMU counters\n", irq); return err; } on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1); } else { for (i = 0; i < irqs; ++i) { err = 0; irq = platform_get_irq(pmu_device, i); if (irq < 0) continue; /* * If we have a single PMU interrupt that we can't shift, * assume that we're running on a uniprocessor machine and * continue. Otherwise, continue without this interrupt. */ if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", irq, i); continue; } err = request_irq(irq, handler, IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu", per_cpu_ptr(&hw_events->percpu_pmu, i)); if (err) { pr_err("unable to request IRQ%d for ARM PMU counters\n", irq); return err; } cpumask_set_cpu(i, &cpu_pmu->active_irqs); } } return 0; }
static int __init csky_mptimer_init(struct device_node *np) { int ret, cpu, cpu_rollback; struct timer_of *to = NULL; /* * Csky_mptimer is designed for C-SKY SMP multi-processors and * every core has it's own private irq and regs for clkevt and * clksrc. * * The regs is accessed by cpu instruction: mfcr/mtcr instead of * mmio map style. So we needn't mmio-address in dts, but we still * need to give clk and irq number. * * We use private irq for the mptimer and irq number is the same * for every core. So we use request_percpu_irq() in timer_of_init. */ csky_mptimer_irq = irq_of_parse_and_map(np, 0); if (csky_mptimer_irq <= 0) return -EINVAL; ret = request_percpu_irq(csky_mptimer_irq, csky_timer_interrupt, "csky_mp_timer", &csky_to); if (ret) return -EINVAL; for_each_possible_cpu(cpu) { to = per_cpu_ptr(&csky_to, cpu); ret = timer_of_init(np, to); if (ret) goto rollback; } clocksource_register_hz(&csky_clocksource, timer_of_rate(to)); sched_clock_register(sched_clock_read, 32, timer_of_rate(to)); ret = cpuhp_setup_state(CPUHP_AP_CSKY_TIMER_STARTING, "clockevents/csky/timer:starting", csky_mptimer_starting_cpu, csky_mptimer_dying_cpu); if (ret) return -EINVAL; return 0; rollback: for_each_possible_cpu(cpu_rollback) { if (cpu_rollback == cpu) break; to = per_cpu_ptr(&csky_to, cpu_rollback); timer_of_cleanup(to); } return -EINVAL; }
int kvm_timer_hyp_init(void) { struct device_node *np; unsigned int ppi; int err; timecounter = arch_timer_get_timecounter(); if (!timecounter) return -ENODEV; np = of_find_matching_node(NULL, arch_timer_of_match); if (!np) { kvm_err("kvm_arch_timer: can't find DT node\n"); return -ENODEV; } ppi = irq_of_parse_and_map(np, 2); if (!ppi) { kvm_err("kvm_arch_timer: no virtual timer interrupt\n"); err = -EINVAL; goto out; } err = request_percpu_irq(ppi, kvm_arch_timer_handler, "kvm guest timer", kvm_get_running_vcpus()); if (err) { kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n", ppi, err); goto out; } timer_irq.irq = ppi; err = register_cpu_notifier(&kvm_timer_cpu_nb); if (err) { kvm_err("Cannot register timer CPU notifier\n"); goto out_free; } wqueue = create_singlethread_workqueue("kvm_arch_timer"); if (!wqueue) { err = -ENOMEM; goto out_free; } kvm_info("%s IRQ%d\n", np->name, ppi); on_each_cpu(kvm_timer_init_interrupt, NULL, 1); goto out; out_free: free_percpu_irq(ppi, kvm_get_running_vcpus()); out: of_node_put(np); return err; }
/** * kvm_vgic_hyp_init: populates the kvm_vgic_global_state variable * according to the host GIC model. Accordingly calls either * vgic_v2/v3_probe which registers the KVM_DEVICE that can be * instantiated by a guest later on . */ int kvm_vgic_hyp_init(void) { const struct gic_kvm_info *gic_kvm_info; int ret; gic_kvm_info = gic_get_kvm_info(); if (!gic_kvm_info) return -ENODEV; if (!gic_kvm_info->maint_irq) { kvm_err("No vgic maintenance irq\n"); return -ENXIO; } switch (gic_kvm_info->type) { case GIC_V2: ret = vgic_v2_probe(gic_kvm_info); break; case GIC_V3: ret = vgic_v3_probe(gic_kvm_info); break; default: ret = -ENODEV; }; if (ret) return ret; kvm_vgic_global_state.maint_irq = gic_kvm_info->maint_irq; ret = request_percpu_irq(kvm_vgic_global_state.maint_irq, vgic_maintenance_handler, "vgic", kvm_get_running_vcpus()); if (ret) { kvm_err("Cannot register interrupt %d\n", kvm_vgic_global_state.maint_irq); return ret; } ret = cpuhp_setup_state(CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING, "AP_KVM_ARM_VGIC_INIT_STARTING", vgic_init_cpu_starting, vgic_init_cpu_dying); if (ret) { kvm_err("Cannot register vgic CPU notifier\n"); goto out_free_irq; } kvm_info("vgic interrupt IRQ%d\n", kvm_vgic_global_state.maint_irq); return 0; out_free_irq: free_percpu_irq(kvm_vgic_global_state.maint_irq, kvm_get_running_vcpus()); return ret; }
static int __init xen_init_events(void) { if (!xen_domain() || xen_events_irq < 0) return -ENODEV; xen_init_IRQ(); if (request_percpu_irq(xen_events_irq, xen_arm_callback, "events", &xen_vcpu)) { pr_err("Error requesting IRQ %d\n", xen_events_irq); return -EINVAL; } on_each_cpu(xen_percpu_init, NULL, 0); return 0; }
static int __init twd_local_timer_common_register(struct device_node *np) { int err; twd_evt = alloc_percpu(struct clock_event_device); if (!twd_evt) { err = -ENOMEM; goto out_free; } err = request_percpu_irq(twd_ppi, twd_handler, "twd", twd_evt); if (err) { pr_err("twd: can't register interrupt %d (%d)\n", twd_ppi, err); goto out_free; } err = register_cpu_notifier(&twd_timer_cpu_nb); if (err) goto out_irq; #ifndef CONFIG_ARCH_CNS3XXX twd_get_clock(np); #endif /* * Immediately configure the timer on the boot CPU, unless we need * jiffies to be incrementing to calibrate the rate in which case * setup the timer in late_time_init. */ if (twd_timer_rate) twd_timer_setup(); else late_time_init = twd_timer_setup; return 0; out_irq: free_percpu_irq(twd_ppi, twd_evt); out_free: iounmap(twd_base); twd_base = NULL; free_percpu(twd_evt); return err; }
static int __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq, bool percpu) { struct clocksource *cs = &msm_clocksource; int res = 0; msm_timer_irq = irq; msm_timer_has_ppi = percpu; msm_evt = alloc_percpu(struct clock_event_device); if (!msm_evt) { pr_err("memory allocation failed for clockevents\n"); goto err; } if (percpu) res = request_percpu_irq(irq, msm_timer_interrupt, "gp_timer", msm_evt); if (res) { pr_err("request_percpu_irq failed\n"); } else { /* Install and invoke hotplug callbacks */ res = cpuhp_setup_state(CPUHP_AP_QCOM_TIMER_STARTING, "AP_QCOM_TIMER_STARTING", msm_local_timer_starting_cpu, msm_local_timer_dying_cpu); if (res) { free_percpu_irq(irq, msm_evt); goto err; } } err: writel_relaxed(TIMER_ENABLE_EN, source_base + TIMER_ENABLE); res = clocksource_register_hz(cs, dgt_hz); if (res) pr_err("clocksource_register failed\n"); sched_clock_register(msm_sched_clock_read, sched_bits, dgt_hz); msm_delay_timer.freq = dgt_hz; register_current_timer_delay(&msm_delay_timer); return res; }
/** 20140920 * twd를 percpu irq로 등록하고, local timer로 등록한다. **/ static int __init twd_local_timer_common_register(void) { int err; /** 20140913 * clock_event_device용 percpu 변수 할당. **/ twd_evt = alloc_percpu(struct clock_event_device *); if (!twd_evt) { err = -ENOMEM; goto out_free; } /** 20140920 * percpu irq로 twd_ppi(IRQ_LOCALTIMER, 29) 등록. * handler는 twd_handler * dev_id는 twd_evt **/ err = request_percpu_irq(twd_ppi, twd_handler, "twd", twd_evt); if (err) { pr_err("twd: can't register interrupt %d (%d)\n", twd_ppi, err); goto out_free; } /** 20140920 * twd_lt_ops를 local timer operations (lt_ops)로 지정한다. **/ err = local_timer_register(&twd_lt_ops); if (err) goto out_irq; return 0; out_irq: free_percpu_irq(twd_ppi, twd_evt); out_free: iounmap(twd_base); twd_base = NULL; free_percpu(twd_evt); return err; }
static void __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq, bool percpu) { struct clocksource *cs = &msm_clocksource; int res = 0; msm_timer_irq = irq; msm_timer_has_ppi = percpu; msm_evt = alloc_percpu(struct clock_event_device); if (!msm_evt) { pr_err("memory allocation failed for clockevents\n"); goto err; } if (percpu) res = request_percpu_irq(irq, msm_timer_interrupt, "gp_timer", msm_evt); if (res) { pr_err("request_percpu_irq failed\n"); } else { res = register_cpu_notifier(&msm_timer_cpu_nb); if (res) { free_percpu_irq(irq, msm_evt); goto err; } /* Immediately configure the timer on the boot CPU */ msm_local_timer_setup(raw_cpu_ptr(msm_evt)); } err: writel_relaxed(TIMER_ENABLE_EN, source_base + TIMER_ENABLE); res = clocksource_register_hz(cs, dgt_hz); if (res) pr_err("clocksource_register failed\n"); sched_clock_register(msm_sched_clock_read, sched_bits, dgt_hz); msm_delay_timer.freq = dgt_hz; register_current_timer_delay(&msm_delay_timer); }
static void __init exynos4_timer_resources(void) { struct clk *mct_clk; mct_clk = clk_get(NULL, "xtal"); clk_rate = clk_get_rate(mct_clk); #ifdef CONFIG_LOCAL_TIMERS if (mct_int_type == MCT_INT_PPI) { int err; err = request_percpu_irq(IRQ_MCT_LOCALTIMER, exynos4_mct_tick_isr, "MCT", &percpu_mct_tick); WARN(err, "MCT: can't request IRQ %d (%d)\n", IRQ_MCT_LOCALTIMER, err); } local_timer_register(&exynos4_mct_tick_ops); #endif /* CONFIG_LOCAL_TIMERS */ }
/* * Setup the local clock events for a CPU. */ void __cpuinit twd_timer_setup(struct clock_event_device *clk) { struct clock_event_device **this_cpu_clk; if (!twd_evt) { int err; twd_evt = alloc_percpu(struct clock_event_device *); if (!twd_evt) { pr_err("twd: can't allocate memory\n"); return; } err = request_percpu_irq(clk->irq, twd_handler, "twd", twd_evt); if (err) { pr_err("twd: can't register interrupt %d (%d)\n", clk->irq, err); return; } }
int __init arm_generic_timer_init(void) { struct device_node *np; int err; u32 freq; np = of_find_matching_node(NULL, arch_timer_of_match); if (!np) { pr_err("arch_timer: can't find DT node\n"); return -ENODEV; } /* Try to determine the frequency from the device tree or CNTFRQ */ if (!of_property_read_u32(np, "clock-frequency", &freq)) arch_timer_rate = freq; arch_timer_calibrate(); arch_timer_ppi = irq_of_parse_and_map(np, 0); pr_info("arch_timer: found %s irq %d\n", np->name, arch_timer_ppi); err = request_percpu_irq(arch_timer_ppi, arch_timer_handle_irq, np->name, &arch_timer_evt); if (err) { pr_err("arch_timer: can't register interrupt %d (%d)\n", arch_timer_ppi, err); return err; } clocksource_register_hz(&clocksource_counter, arch_timer_rate); /* Calibrate the delay loop directly */ lpj_fine = arch_timer_rate / HZ; /* Immediately configure the timer on the boot CPU */ arch_timer_setup(per_cpu_ptr(&arch_timer_evt, smp_processor_id())); register_cpu_notifier(&arch_timer_cpu_nb); return 0; }
static int __init nps_setup_clockevent(struct device_node *node) { struct clk *clk; int ret; nps_timer0_irq = irq_of_parse_and_map(node, 0); if (nps_timer0_irq <= 0) { pr_err("clockevent: missing irq"); return -EINVAL; } ret = nps_get_timer_clk(node, &nps_timer0_freq, &clk); if (ret) return ret; /* Needs apriori irq_set_percpu_devid() done in intc map function */ ret = request_percpu_irq(nps_timer0_irq, timer_irq_handler, "Timer0 (per-cpu-tick)", &nps_clockevent_device); if (ret) { pr_err("Couldn't request irq\n"); clk_disable_unprepare(clk); return ret; } ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING, "clockevents/nps:starting", nps_timer_starting_cpu, nps_timer_dying_cpu); if (ret) { pr_err("Failed to setup hotplug state"); clk_disable_unprepare(clk); free_percpu_irq(nps_timer0_irq, &nps_clockevent_device); return ret; } return 0; }
/** * timer_of_irq_init - Request the interrupt * @np: a device tree node pointer * @of_irq: an of_timer_irq structure pointer * * Get the interrupt number from the DT from its definition and * request it. The interrupt is gotten by falling back the following way: * * - Get interrupt number by name * - Get interrupt number by index * * When the interrupt is per CPU, 'request_percpu_irq()' is called, * otherwise 'request_irq()' is used. * * Returns 0 on success, < 0 otherwise */ static __init int timer_of_irq_init(struct device_node *np, struct of_timer_irq *of_irq) { int ret; struct timer_of *to = container_of(of_irq, struct timer_of, of_irq); struct clock_event_device *clkevt = &to->clkevt; if (of_irq->name) { of_irq->irq = ret = of_irq_get_byname(np, of_irq->name); if (ret < 0) { pr_err("Failed to get interrupt %s for %s\n", of_irq->name, np->full_name); return ret; } } else { of_irq->irq = irq_of_parse_and_map(np, of_irq->index); } if (!of_irq->irq) { pr_err("Failed to map interrupt for %pOF\n", np); return -EINVAL; } ret = of_irq->percpu ? request_percpu_irq(of_irq->irq, of_irq->handler, np->full_name, clkevt) : request_irq(of_irq->irq, of_irq->handler, of_irq->flags ? of_irq->flags : IRQF_TIMER, np->full_name, clkevt); if (ret) { pr_err("Failed to request irq %d for %pOF\n", of_irq->irq, np); return ret; } clkevt->irq = of_irq->irq; return 0; }
int __init generic_timer_register(void) { int err; if (timer_evt) return -EBUSY; timer_ppi = GIC_PPI_PRIVATE_TIMER; timer_evt = alloc_percpu(struct clock_event_device *); if (!timer_evt) { err = -ENOMEM; goto out_exit; } err = request_percpu_irq(timer_ppi, timer_handler, "timer", timer_evt); if (err) { pr_err("generic timer: can't register interrupt %d (%d)\n", timer_ppi, err); goto out_free; } err = local_timer_register(&generic_timer_ops); if (err) goto out_irq; return 0; out_irq: free_percpu_irq(timer_ppi, timer_evt); out_free: free_percpu(timer_evt); timer_evt = NULL; out_exit: return err; }
static void irq_wdt_init(void) { int err =0; wdt_evt = alloc_percpu(int *); if (!wdt_evt) { err = -ENOMEM; printk( "fwq ?????\n"); goto out_free; } err = request_percpu_irq(GIC_PPI_WATCHDOG_TIMER, wdt_handler, "local_wdt", wdt_evt); if (err) { printk(KERN_ERR "fwq Fail to request IRQ for local WDT\n"); }else { printk( "fwq user IRQ\n"); } return; out_free: printk( "fwq error \n"); free_percpu(wdt_evt); return; }
static int __init arch_timer_register(void) { int err; int ppi; arch_timer_evt = alloc_percpu(struct clock_event_device); if (!arch_timer_evt) { err = -ENOMEM; goto out; } ppi = arch_timer_ppi[arch_timer_uses_ppi]; switch (arch_timer_uses_ppi) { case VIRT_PPI: err = request_percpu_irq(ppi, arch_timer_handler_virt, "arch_timer", arch_timer_evt); break; case PHYS_SECURE_PPI: case PHYS_NONSECURE_PPI: err = request_percpu_irq(ppi, arch_timer_handler_phys, "arch_timer", arch_timer_evt); if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) { ppi = arch_timer_ppi[PHYS_NONSECURE_PPI]; err = request_percpu_irq(ppi, arch_timer_handler_phys, "arch_timer", arch_timer_evt); if (err) free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], arch_timer_evt); } break; case HYP_PPI: err = request_percpu_irq(ppi, arch_timer_handler_phys, "arch_timer", arch_timer_evt); break; default: BUG(); } if (err) { pr_err("arch_timer: can't register interrupt %d (%d)\n", ppi, err); goto out_free; } err = arch_timer_cpu_pm_init(); if (err) goto out_unreg_notify; /* Register and immediately configure the timer on the boot CPU */ err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING, "AP_ARM_ARCH_TIMER_STARTING", arch_timer_starting_cpu, arch_timer_dying_cpu); if (err) goto out_unreg_cpupm; return 0; out_unreg_cpupm: arch_timer_cpu_pm_deinit(); out_unreg_notify: free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt); if (arch_timer_has_nonsecure_ppi()) free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], arch_timer_evt); out_free: free_percpu(arch_timer_evt); out: return err; }
void __init armada_370_xp_timer_init(void) { u32 u; struct device_node *np; int res; np = of_find_compatible_node(NULL, NULL, "marvell,armada-370-xp-timer"); timer_base = of_iomap(np, 0); WARN_ON(!timer_base); local_base = of_iomap(np, 1); if (of_find_property(np, "marvell,timer-25Mhz", NULL)) { /* The fixed 25MHz timer is available so let's use it */ u = readl(timer_base + TIMER_CTRL_OFF); writel(u | TIMER0_25MHZ, timer_base + TIMER_CTRL_OFF); timer_clk = 25000000; } else { unsigned long rate = 0; struct clk *clk = of_clk_get(np, 0); WARN_ON(IS_ERR(clk)); rate = clk_get_rate(clk); u = readl(timer_base + TIMER_CTRL_OFF); writel(u & ~(TIMER0_25MHZ), timer_base + TIMER_CTRL_OFF); timer_clk = rate / TIMER_DIVIDER; timer25Mhz = false; } /* * We use timer 0 as clocksource, and private(local) timer 0 * for clockevents */ armada_370_xp_clkevt_irq = irq_of_parse_and_map(np, 4); ticks_per_jiffy = (timer_clk + HZ / 2) / HZ; /* * Set scale and timer for sched_clock. */ setup_sched_clock(armada_370_xp_read_sched_clock, 32, timer_clk); /* * Setup free-running clocksource timer (interrupts * disabled). */ writel(0xffffffff, timer_base + TIMER0_VAL_OFF); writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF); u = readl(timer_base + TIMER_CTRL_OFF); writel((u | TIMER0_EN | TIMER0_RELOAD_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT)), timer_base + TIMER_CTRL_OFF); clocksource_mmio_init(timer_base + TIMER0_VAL_OFF, "armada_370_xp_clocksource", timer_clk, 300, 32, clocksource_mmio_readl_down); register_cpu_notifier(&armada_370_xp_timer_cpu_nb); armada_370_xp_evt = alloc_percpu(struct clock_event_device); /* * Setup clockevent timer (interrupt-driven). */ res = request_percpu_irq(armada_370_xp_clkevt_irq, armada_370_xp_timer_interrupt, "armada_370_xp_per_cpu_tick", armada_370_xp_evt); /* Immediately configure the timer on the boot CPU */ if (!res) armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt)); }
static int __init armada_370_xp_timer_common_init(struct device_node *np) { u32 clr = 0, set = 0; int res; timer_base = of_iomap(np, 0); if (!timer_base) { pr_err("Failed to iomap"); return -ENXIO; } local_base = of_iomap(np, 1); if (!local_base) { pr_err("Failed to iomap"); return -ENXIO; } if (timer25Mhz) { set = TIMER0_25MHZ; enable_mask = TIMER0_EN; } else { clr = TIMER0_25MHZ; enable_mask = TIMER0_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT); } atomic_io_modify(timer_base + TIMER_CTRL_OFF, clr | set, set); local_timer_ctrl_clrset(clr, set); /* * We use timer 0 as clocksource, and private(local) timer 0 * for clockevents */ armada_370_xp_clkevt_irq = irq_of_parse_and_map(np, 4); ticks_per_jiffy = (timer_clk + HZ / 2) / HZ; /* * Setup free-running clocksource timer (interrupts * disabled). */ writel(0xffffffff, timer_base + TIMER0_VAL_OFF); writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF); atomic_io_modify(timer_base + TIMER_CTRL_OFF, TIMER0_RELOAD_EN | enable_mask, TIMER0_RELOAD_EN | enable_mask); armada_370_delay_timer.freq = timer_clk; register_current_timer_delay(&armada_370_delay_timer); /* * Set scale and timer for sched_clock. */ sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk); res = clocksource_mmio_init(timer_base + TIMER0_VAL_OFF, "armada_370_xp_clocksource", timer_clk, 300, 32, clocksource_mmio_readl_down); if (res) { pr_err("Failed to initialize clocksource mmio"); return res; } armada_370_xp_evt = alloc_percpu(struct clock_event_device); if (!armada_370_xp_evt) return -ENOMEM; /* * Setup clockevent timer (interrupt-driven). */ res = request_percpu_irq(armada_370_xp_clkevt_irq, armada_370_xp_timer_interrupt, "armada_370_xp_per_cpu_tick", armada_370_xp_evt); /* Immediately configure the timer on the boot CPU */ if (res) { pr_err("Failed to request percpu irq"); return res; } res = cpuhp_setup_state(CPUHP_AP_ARMADA_TIMER_STARTING, "clockevents/armada:starting", armada_370_xp_timer_starting_cpu, armada_370_xp_timer_dying_cpu); if (res) { pr_err("Failed to setup hotplug state and timer"); return res; } register_syscore_ops(&armada_370_xp_timer_syscore_ops); return 0; }
static void __init global_timer_of_register(struct device_node *np) { struct clk *gt_clk; int err = 0; /* * In r2p0 the comparators for each processor with the global timer * fire when the timer value is greater than or equal to. In previous * revisions the comparators fired when the timer value was equal to. */ if ((read_cpuid_id() & 0xf0000f) < 0x200000) { pr_warn("global-timer: non support for this cpu version.\n"); return; } gt_ppi = irq_of_parse_and_map(np, 0); if (!gt_ppi) { pr_warn("global-timer: unable to parse irq\n"); return; } gt_base = of_iomap(np, 0); if (!gt_base) { pr_warn("global-timer: invalid base address\n"); return; } gt_clk = of_clk_get(np, 0); if (!IS_ERR(gt_clk)) { err = clk_prepare_enable(gt_clk); if (err) goto out_unmap; } else { pr_warn("global-timer: clk not found\n"); err = -EINVAL; goto out_unmap; } gt_clk_rate = clk_get_rate(gt_clk); gt_evt = alloc_percpu(struct clock_event_device); if (!gt_evt) { pr_warn("global-timer: can't allocate memory\n"); err = -ENOMEM; goto out_clk; } clk_rate_change_nb.notifier_call = arm_global_timer_clockevent_cb; clk_rate_change_nb.next = NULL; if (clk_notifier_register(gt_clk, &clk_rate_change_nb)) { pr_warn("Unable to register clock notifier.\n"); } err = request_percpu_irq(gt_ppi, gt_clockevent_interrupt, "gt", gt_evt); if (err) { pr_warn("global-timer: can't register interrupt %d (%d)\n", gt_ppi, err); goto out_free; } err = register_cpu_notifier(>_cpu_nb); if (err) { pr_warn("global-timer: unable to register cpu notifier.\n"); goto out_irq; } /* Immediately configure the timer on the boot CPU */ gt_clocksource_init(); gt_clockevents_init(this_cpu_ptr(gt_evt)); return; out_irq: free_percpu_irq(gt_ppi, gt_evt); out_free: free_percpu(gt_evt); out_clk: clk_disable_unprepare(gt_clk); out_unmap: iounmap(gt_base); WARN(err, "ARM Global timer register failed (%d)\n", err); }
static void __init armada_370_xp_timer_common_init(struct device_node *np) { u32 clr = 0, set = 0; int res; timer_base = of_iomap(np, 0); WARN_ON(!timer_base); local_base = of_iomap(np, 1); if (timer25Mhz) { set = TIMER0_25MHZ; enable_mask = TIMER0_EN; } else { clr = TIMER0_25MHZ; enable_mask = TIMER0_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT); } atomic_io_modify(timer_base + TIMER_CTRL_OFF, clr | set, set); local_timer_ctrl_clrset(clr, set); /* * We use timer 0 as clocksource, and private(local) timer 0 * for clockevents */ armada_370_xp_clkevt_irq = irq_of_parse_and_map(np, 4); ticks_per_jiffy = (timer_clk + HZ / 2) / HZ; /* * Setup free-running clocksource timer (interrupts * disabled). */ writel(0xffffffff, timer_base + TIMER0_VAL_OFF); writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF); atomic_io_modify(timer_base + TIMER_CTRL_OFF, TIMER0_RELOAD_EN | enable_mask, TIMER0_RELOAD_EN | enable_mask); /* * Set scale and timer for sched_clock. */ sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk); clocksource_mmio_init(timer_base + TIMER0_VAL_OFF, "armada_370_xp_clocksource", timer_clk, 300, 32, clocksource_mmio_readl_down); register_cpu_notifier(&armada_370_xp_timer_cpu_nb); armada_370_xp_evt = alloc_percpu(struct clock_event_device); /* * Setup clockevent timer (interrupt-driven). */ res = request_percpu_irq(armada_370_xp_clkevt_irq, armada_370_xp_timer_interrupt, "armada_370_xp_per_cpu_tick", armada_370_xp_evt); /* Immediately configure the timer on the boot CPU */ if (!res) armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt)); register_syscore_ops(&armada_370_xp_timer_syscore_ops); }
static int __init xen_guest_init(void) { struct xen_add_to_physmap xatp; static struct shared_info *shared_info_page = 0; struct device_node *node; int len; const char *s = NULL; const char *version = NULL; const char *xen_prefix = "xen,xen-"; struct resource res; phys_addr_t grant_frames; node = of_find_compatible_node(NULL, NULL, "xen,xen"); if (!node) { pr_debug("No Xen support\n"); return 0; } s = of_get_property(node, "compatible", &len); if (strlen(xen_prefix) + 3 < len && !strncmp(xen_prefix, s, strlen(xen_prefix))) version = s + strlen(xen_prefix); if (version == NULL) { pr_debug("Xen version not found\n"); return 0; } if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res)) return 0; grant_frames = res.start; xen_events_irq = irq_of_parse_and_map(node, 0); pr_info("Xen %s support found, events_irq=%d gnttab_frame=%pa\n", version, xen_events_irq, &grant_frames); if (xen_events_irq < 0) return -ENODEV; xen_domain_type = XEN_HVM_DOMAIN; xen_setup_features(); if (xen_feature(XENFEAT_dom0)) xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED; else xen_start_info->flags &= ~(SIF_INITDOMAIN|SIF_PRIVILEGED); if (!shared_info_page) shared_info_page = (struct shared_info *) get_zeroed_page(GFP_KERNEL); if (!shared_info_page) { pr_err("not enough memory\n"); return -ENOMEM; } xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); HYPERVISOR_shared_info = (struct shared_info *)shared_info_page; /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info * page, we use it in the event channel upcall and in some pvclock * related functions. * The shared info contains exactly 1 CPU (the boot CPU). The guest * is required to use VCPUOP_register_vcpu_info to place vcpu info * for secondary CPUs as they are brought up. * For uniformity we use VCPUOP_register_vcpu_info even on cpu0. */ xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info), sizeof(struct vcpu_info)); if (xen_vcpu_info == NULL) return -ENOMEM; if (gnttab_setup_auto_xlat_frames(grant_frames)) { free_percpu(xen_vcpu_info); return -ENOMEM; } gnttab_init(); if (!xen_initial_domain()) xenbus_probe(NULL); /* * Making sure board specific code will not set up ops for * cpu idle and cpu freq. */ disable_cpuidle(); disable_cpufreq(); xen_init_IRQ(); if (request_percpu_irq(xen_events_irq, xen_arm_callback, "events", &xen_vcpu)) { pr_err("Error request IRQ %d\n", xen_events_irq); return -EINVAL; } xen_percpu_init(); register_cpu_notifier(&xen_cpu_notifier); return 0; }