static int __init arch_timer_register(void) { int err; int ppi; arch_timer_evt = alloc_percpu(struct clock_event_device); if (!arch_timer_evt) { err = -ENOMEM; goto out; } if (arch_timer_use_virtual) { ppi = arch_timer_ppi[VIRT_PPI]; err = request_percpu_irq(ppi, arch_timer_handler_virt, "arch_timer", arch_timer_evt); } else { ppi = arch_timer_ppi[PHYS_SECURE_PPI]; err = request_percpu_irq(ppi, arch_timer_handler_phys, "arch_timer", arch_timer_evt); if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) { ppi = arch_timer_ppi[PHYS_NONSECURE_PPI]; err = request_percpu_irq(ppi, arch_timer_handler_phys, "arch_timer", arch_timer_evt); if (err) free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], arch_timer_evt); } } if (err) { pr_err("arch_timer: can't register interrupt %d (%d)\n", ppi, err); goto out_free; } err = register_cpu_notifier(&arch_timer_cpu_nb); if (err) goto out_free_irq; /* Immediately configure the timer on the boot CPU */ arch_timer_setup(this_cpu_ptr(arch_timer_evt)); return 0; out_free_irq: if (arch_timer_use_virtual) free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt); else { free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], arch_timer_evt); if (arch_timer_ppi[PHYS_NONSECURE_PPI]) free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], arch_timer_evt); } out_free: free_percpu(arch_timer_evt); out: return err; }
static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) { int i, irq, irqs; struct platform_device *pmu_device = cpu_pmu->plat_device; struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events; irqs = min(pmu_device->num_resources, num_possible_cpus()); irq = platform_get_irq(pmu_device, 0); if (irq >= 0 && irq_is_percpu(irq)) { on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1); free_percpu_irq(irq, &hw_events->percpu_pmu); } else { for (i = 0; i < irqs; ++i) { int cpu = i; if (cpu_pmu->irq_affinity) cpu = cpu_pmu->irq_affinity[i]; if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs)) continue; irq = platform_get_irq(pmu_device, i); if (irq >= 0) free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu)); } } }
static int __init twd_local_timer_common_register(void) { int err; twd_evt = alloc_percpu(struct clock_event_device *); if (!twd_evt) { err = -ENOMEM; goto out_free; } err = request_percpu_irq(twd_ppi, twd_handler, "twd", twd_evt); if (err) { pr_err("twd: can't register interrupt %d (%d)\n", twd_ppi, err); goto out_free; } err = local_timer_register(&twd_lt_ops); if (err) goto out_irq; return 0; out_irq: free_percpu_irq(twd_ppi, twd_evt); out_free: iounmap(twd_base); twd_base = NULL; free_percpu(twd_evt); return err; }
static int __init twd_local_timer_common_register(void) { int err; twd_evt = alloc_percpu(struct clock_event_device *); if (!twd_evt) { err = -ENOMEM; goto out_free; } err = request_percpu_irq(twd_ppi, twd_handler, "twd", twd_evt); if (err) { pr_err("twd: can't register interrupt %d (%d)\n", twd_ppi, err); goto out_free; } err = local_timer_register(&twd_lt_ops); if (err) goto out_irq; #ifdef CONFIG_IPIPE_DEBUG_INTERNAL __ipipe_mach_hrtimer_debug = &twd_hrtimer_debug; #endif /* CONFIG_IPIPE_DEBUG_INTERNAL */ return 0; out_irq: free_percpu_irq(twd_ppi, twd_evt); out_free: iounmap(twd_base); twd_base = NULL; free_percpu(twd_evt); return err; }
/** * timer_of_irq_exit - Release the interrupt * @of_irq: an of_timer_irq structure pointer * * Free the irq resource */ static __init void timer_of_irq_exit(struct of_timer_irq *of_irq) { struct timer_of *to = container_of(of_irq, struct timer_of, of_irq); struct clock_event_device *clkevt = &to->clkevt; of_irq->percpu ? free_percpu_irq(of_irq->irq, clkevt) : free_irq(of_irq->irq, clkevt); }
int kvm_timer_hyp_init(void) { struct device_node *np; unsigned int ppi; int err; timecounter = arch_timer_get_timecounter(); if (!timecounter) return -ENODEV; np = of_find_matching_node(NULL, arch_timer_of_match); if (!np) { kvm_err("kvm_arch_timer: can't find DT node\n"); return -ENODEV; } ppi = irq_of_parse_and_map(np, 2); if (!ppi) { kvm_err("kvm_arch_timer: no virtual timer interrupt\n"); err = -EINVAL; goto out; } err = request_percpu_irq(ppi, kvm_arch_timer_handler, "kvm guest timer", kvm_get_running_vcpus()); if (err) { kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n", ppi, err); goto out; } timer_irq.irq = ppi; err = register_cpu_notifier(&kvm_timer_cpu_nb); if (err) { kvm_err("Cannot register timer CPU notifier\n"); goto out_free; } wqueue = create_singlethread_workqueue("kvm_arch_timer"); if (!wqueue) { err = -ENOMEM; goto out_free; } kvm_info("%s IRQ%d\n", np->name, ppi); on_each_cpu(kvm_timer_init_interrupt, NULL, 1); goto out; out_free: free_percpu_irq(ppi, kvm_get_running_vcpus()); out: of_node_put(np); return err; }
/** * kvm_vgic_hyp_init: populates the kvm_vgic_global_state variable * according to the host GIC model. Accordingly calls either * vgic_v2/v3_probe which registers the KVM_DEVICE that can be * instantiated by a guest later on . */ int kvm_vgic_hyp_init(void) { const struct gic_kvm_info *gic_kvm_info; int ret; gic_kvm_info = gic_get_kvm_info(); if (!gic_kvm_info) return -ENODEV; if (!gic_kvm_info->maint_irq) { kvm_err("No vgic maintenance irq\n"); return -ENXIO; } switch (gic_kvm_info->type) { case GIC_V2: ret = vgic_v2_probe(gic_kvm_info); break; case GIC_V3: ret = vgic_v3_probe(gic_kvm_info); break; default: ret = -ENODEV; }; if (ret) return ret; kvm_vgic_global_state.maint_irq = gic_kvm_info->maint_irq; ret = request_percpu_irq(kvm_vgic_global_state.maint_irq, vgic_maintenance_handler, "vgic", kvm_get_running_vcpus()); if (ret) { kvm_err("Cannot register interrupt %d\n", kvm_vgic_global_state.maint_irq); return ret; } ret = cpuhp_setup_state(CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING, "AP_KVM_ARM_VGIC_INIT_STARTING", vgic_init_cpu_starting, vgic_init_cpu_dying); if (ret) { kvm_err("Cannot register vgic CPU notifier\n"); goto out_free_irq; } kvm_info("vgic interrupt IRQ%d\n", kvm_vgic_global_state.maint_irq); return 0; out_free_irq: free_percpu_irq(kvm_vgic_global_state.maint_irq, kvm_get_running_vcpus()); return ret; }
static int __init twd_local_timer_common_register(struct device_node *np) { int err; twd_evt = alloc_percpu(struct clock_event_device); if (!twd_evt) { err = -ENOMEM; goto out_free; } err = request_percpu_irq(twd_ppi, twd_handler, "twd", twd_evt); if (err) { pr_err("twd: can't register interrupt %d (%d)\n", twd_ppi, err); goto out_free; } err = register_cpu_notifier(&twd_timer_cpu_nb); if (err) goto out_irq; #ifndef CONFIG_ARCH_CNS3XXX twd_get_clock(np); #endif /* * Immediately configure the timer on the boot CPU, unless we need * jiffies to be incrementing to calibrate the rate in which case * setup the timer in late_time_init. */ if (twd_timer_rate) twd_timer_setup(); else late_time_init = twd_timer_setup; return 0; out_irq: free_percpu_irq(twd_ppi, twd_evt); out_free: iounmap(twd_base); twd_base = NULL; free_percpu(twd_evt); return err; }
static int __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq, bool percpu) { struct clocksource *cs = &msm_clocksource; int res = 0; msm_timer_irq = irq; msm_timer_has_ppi = percpu; msm_evt = alloc_percpu(struct clock_event_device); if (!msm_evt) { pr_err("memory allocation failed for clockevents\n"); goto err; } if (percpu) res = request_percpu_irq(irq, msm_timer_interrupt, "gp_timer", msm_evt); if (res) { pr_err("request_percpu_irq failed\n"); } else { /* Install and invoke hotplug callbacks */ res = cpuhp_setup_state(CPUHP_AP_QCOM_TIMER_STARTING, "AP_QCOM_TIMER_STARTING", msm_local_timer_starting_cpu, msm_local_timer_dying_cpu); if (res) { free_percpu_irq(irq, msm_evt); goto err; } } err: writel_relaxed(TIMER_ENABLE_EN, source_base + TIMER_ENABLE); res = clocksource_register_hz(cs, dgt_hz); if (res) pr_err("clocksource_register failed\n"); sched_clock_register(msm_sched_clock_read, sched_bits, dgt_hz); msm_delay_timer.freq = dgt_hz; register_current_timer_delay(&msm_delay_timer); return res; }
/** 20140920 * twd를 percpu irq로 등록하고, local timer로 등록한다. **/ static int __init twd_local_timer_common_register(void) { int err; /** 20140913 * clock_event_device용 percpu 변수 할당. **/ twd_evt = alloc_percpu(struct clock_event_device *); if (!twd_evt) { err = -ENOMEM; goto out_free; } /** 20140920 * percpu irq로 twd_ppi(IRQ_LOCALTIMER, 29) 등록. * handler는 twd_handler * dev_id는 twd_evt **/ err = request_percpu_irq(twd_ppi, twd_handler, "twd", twd_evt); if (err) { pr_err("twd: can't register interrupt %d (%d)\n", twd_ppi, err); goto out_free; } /** 20140920 * twd_lt_ops를 local timer operations (lt_ops)로 지정한다. **/ err = local_timer_register(&twd_lt_ops); if (err) goto out_irq; return 0; out_irq: free_percpu_irq(twd_ppi, twd_evt); out_free: iounmap(twd_base); twd_base = NULL; free_percpu(twd_evt); return err; }
static void __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq, bool percpu) { struct clocksource *cs = &msm_clocksource; int res = 0; msm_timer_irq = irq; msm_timer_has_ppi = percpu; msm_evt = alloc_percpu(struct clock_event_device); if (!msm_evt) { pr_err("memory allocation failed for clockevents\n"); goto err; } if (percpu) res = request_percpu_irq(irq, msm_timer_interrupt, "gp_timer", msm_evt); if (res) { pr_err("request_percpu_irq failed\n"); } else { res = register_cpu_notifier(&msm_timer_cpu_nb); if (res) { free_percpu_irq(irq, msm_evt); goto err; } /* Immediately configure the timer on the boot CPU */ msm_local_timer_setup(raw_cpu_ptr(msm_evt)); } err: writel_relaxed(TIMER_ENABLE_EN, source_base + TIMER_ENABLE); res = clocksource_register_hz(cs, dgt_hz); if (res) pr_err("clocksource_register failed\n"); sched_clock_register(msm_sched_clock_read, sched_bits, dgt_hz); msm_delay_timer.freq = dgt_hz; register_current_timer_delay(&msm_delay_timer); }
static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) { int i, irq, irqs; struct platform_device *pmu_device = cpu_pmu->plat_device; irqs = min(pmu_device->num_resources, num_possible_cpus()); irq = platform_get_irq(pmu_device, 0); if (irq >= 0 && irq_is_percpu(irq)) { on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1); free_percpu_irq(irq, &percpu_pmu); } else { for (i = 0; i < irqs; ++i) { if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs)) continue; irq = platform_get_irq(pmu_device, i); if (irq >= 0) free_irq(irq, cpu_pmu); } } }
static int __init nps_setup_clockevent(struct device_node *node) { struct clk *clk; int ret; nps_timer0_irq = irq_of_parse_and_map(node, 0); if (nps_timer0_irq <= 0) { pr_err("clockevent: missing irq"); return -EINVAL; } ret = nps_get_timer_clk(node, &nps_timer0_freq, &clk); if (ret) return ret; /* Needs apriori irq_set_percpu_devid() done in intc map function */ ret = request_percpu_irq(nps_timer0_irq, timer_irq_handler, "Timer0 (per-cpu-tick)", &nps_clockevent_device); if (ret) { pr_err("Couldn't request irq\n"); clk_disable_unprepare(clk); return ret; } ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING, "clockevents/nps:starting", nps_timer_starting_cpu, nps_timer_dying_cpu); if (ret) { pr_err("Failed to setup hotplug state"); clk_disable_unprepare(clk); free_percpu_irq(nps_timer0_irq, &nps_clockevent_device); return ret; } return 0; }
int __init generic_timer_register(void) { int err; if (timer_evt) return -EBUSY; timer_ppi = GIC_PPI_PRIVATE_TIMER; timer_evt = alloc_percpu(struct clock_event_device *); if (!timer_evt) { err = -ENOMEM; goto out_exit; } err = request_percpu_irq(timer_ppi, timer_handler, "timer", timer_evt); if (err) { pr_err("generic timer: can't register interrupt %d (%d)\n", timer_ppi, err); goto out_free; } err = local_timer_register(&generic_timer_ops); if (err) goto out_irq; return 0; out_irq: free_percpu_irq(timer_ppi, timer_evt); out_free: free_percpu(timer_evt); timer_evt = NULL; out_exit: return err; }
static int __init arch_timer_register(void) { int err; int ppi; arch_timer_evt = alloc_percpu(struct clock_event_device); if (!arch_timer_evt) { err = -ENOMEM; goto out; } ppi = arch_timer_ppi[arch_timer_uses_ppi]; switch (arch_timer_uses_ppi) { case VIRT_PPI: err = request_percpu_irq(ppi, arch_timer_handler_virt, "arch_timer", arch_timer_evt); break; case PHYS_SECURE_PPI: case PHYS_NONSECURE_PPI: err = request_percpu_irq(ppi, arch_timer_handler_phys, "arch_timer", arch_timer_evt); if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) { ppi = arch_timer_ppi[PHYS_NONSECURE_PPI]; err = request_percpu_irq(ppi, arch_timer_handler_phys, "arch_timer", arch_timer_evt); if (err) free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], arch_timer_evt); } break; case HYP_PPI: err = request_percpu_irq(ppi, arch_timer_handler_phys, "arch_timer", arch_timer_evt); break; default: BUG(); } if (err) { pr_err("arch_timer: can't register interrupt %d (%d)\n", ppi, err); goto out_free; } err = arch_timer_cpu_pm_init(); if (err) goto out_unreg_notify; /* Register and immediately configure the timer on the boot CPU */ err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING, "AP_ARM_ARCH_TIMER_STARTING", arch_timer_starting_cpu, arch_timer_dying_cpu); if (err) goto out_unreg_cpupm; return 0; out_unreg_cpupm: arch_timer_cpu_pm_deinit(); out_unreg_notify: free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt); if (arch_timer_has_nonsecure_ppi()) free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], arch_timer_evt); out_free: free_percpu(arch_timer_evt); out: return err; }
static void __init global_timer_of_register(struct device_node *np) { struct clk *gt_clk; int err = 0; /* * In r2p0 the comparators for each processor with the global timer * fire when the timer value is greater than or equal to. In previous * revisions the comparators fired when the timer value was equal to. */ if ((read_cpuid_id() & 0xf0000f) < 0x200000) { pr_warn("global-timer: non support for this cpu version.\n"); return; } gt_ppi = irq_of_parse_and_map(np, 0); if (!gt_ppi) { pr_warn("global-timer: unable to parse irq\n"); return; } gt_base = of_iomap(np, 0); if (!gt_base) { pr_warn("global-timer: invalid base address\n"); return; } gt_clk = of_clk_get(np, 0); if (!IS_ERR(gt_clk)) { err = clk_prepare_enable(gt_clk); if (err) goto out_unmap; } else { pr_warn("global-timer: clk not found\n"); err = -EINVAL; goto out_unmap; } gt_clk_rate = clk_get_rate(gt_clk); gt_evt = alloc_percpu(struct clock_event_device); if (!gt_evt) { pr_warn("global-timer: can't allocate memory\n"); err = -ENOMEM; goto out_clk; } clk_rate_change_nb.notifier_call = arm_global_timer_clockevent_cb; clk_rate_change_nb.next = NULL; if (clk_notifier_register(gt_clk, &clk_rate_change_nb)) { pr_warn("Unable to register clock notifier.\n"); } err = request_percpu_irq(gt_ppi, gt_clockevent_interrupt, "gt", gt_evt); if (err) { pr_warn("global-timer: can't register interrupt %d (%d)\n", gt_ppi, err); goto out_free; } err = register_cpu_notifier(>_cpu_nb); if (err) { pr_warn("global-timer: unable to register cpu notifier.\n"); goto out_irq; } /* Immediately configure the timer on the boot CPU */ gt_clocksource_init(); gt_clockevents_init(this_cpu_ptr(gt_evt)); return; out_irq: free_percpu_irq(gt_ppi, gt_evt); out_free: free_percpu(gt_evt); out_clk: clk_disable_unprepare(gt_clk); out_unmap: iounmap(gt_base); WARN(err, "ARM Global timer register failed (%d)\n", err); }