static int __init tegra_gic_of_init(struct device_node *node, struct device_node *parent) { int i; struct device_node *arm_gic_np = of_find_compatible_node(NULL, NULL, "arm,cortex-a15-gic"); struct device_node *tegra_gic_np = of_find_compatible_node(NULL, NULL, "nvidia,tegra-gic"); tegra_wakeup_table_init(); gic_dist_base = of_iomap(arm_gic_np, 0); gic_cpu_base = of_iomap(arm_gic_np, 1); gic_version = (readl(gic_dist_base + 0xFE8) & 0xF0) >> 4; /* Retrieve # of ictrls from DT and fallback to gic dist */ if (of_property_read_u32(tegra_gic_np, "num-ictrls", &num_ictlrs)) num_ictlrs = readl_relaxed(gic_dist_base + GIC_DIST_CTR) & 0x1f; pr_info("the number of interrupt controllers found is %d", num_ictlrs); ictlr_reg_base = kzalloc(sizeof(void *) * num_ictlrs, GFP_KERNEL); tegra_clocks_init(); for (i = 0; i < num_ictlrs; i++) { ictlr_reg_base[i] = of_iomap(node, i); if (!ictlr_reg_base[i]) { pr_info("failed to get the right register\n"); return -EINVAL; } writel(~0, ictlr_reg_base[i] + ICTLR_CPU_IER_CLR); writel(0, ictlr_reg_base[i] + ICTLR_CPU_IEP_CLASS); writel(~0, ictlr_reg_base[i] + ICTLR_CPU_IEP_FIR_CLR); } gic_arch_extn.irq_ack = tegra_ack; gic_arch_extn.irq_eoi = tegra_eoi; gic_arch_extn.irq_mask = tegra_mask; gic_arch_extn.irq_unmask = tegra_unmask; gic_arch_extn.irq_retrigger = tegra_retrigger; gic_arch_extn.irq_set_type = tegra_set_type; gic_arch_extn.irq_set_wake = tegra_set_wake; gic_arch_extn.flags = IRQCHIP_MASK_ON_SUSPEND; #ifdef CONFIG_PM_SLEEP tegra_legacy_irq_syscore_init(); if (gic_version == GIC_V2) cpu_pm_register_notifier(&tegra_gic_notifier_block); #endif #if !defined(CONFIG_TRUSTED_FOUNDATIONS) && \ defined(CONFIG_ARCH_TEGRA_12x_SOC) && defined(CONFIG_FIQ_DEBUGGER) tegra_gic_dist_init(); #endif return 0; }
static void tegra114_gic_cpu_pm_registration(void) { struct device_node *dn; dn = of_find_matching_node(NULL, tegra114_dt_gic_match); if (!dn) return; tegra_gic_cpu_base = of_iomap(dn, 1); cpu_pm_register_notifier(&tegra_gic_notifier_block); }
static void __init gic_pm_init(struct gic_chip_data *gic) { gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4, sizeof(u32)); BUG_ON(!gic->saved_ppi_enable); gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4, sizeof(u32)); BUG_ON(!gic->saved_ppi_conf); cpu_pm_register_notifier(&gic_notifier_block); }
static int __init vf610_mscm_ir_of_init(struct device_node *node, struct device_node *parent) { struct irq_domain *domain, *domain_parent; struct regmap *mscm_cp_regmap; int ret, cpuid; domain_parent = irq_find_host(parent); if (!domain_parent) { pr_err("vf610_mscm_ir: interrupt-parent not found\n"); return -EINVAL; } mscm_ir_data = kzalloc(sizeof(*mscm_ir_data), GFP_KERNEL); if (!mscm_ir_data) return -ENOMEM; mscm_ir_data->mscm_ir_base = of_io_request_and_map(node, 0, "mscm-ir"); if (!mscm_ir_data->mscm_ir_base) { pr_err("vf610_mscm_ir: unable to map mscm register\n"); ret = -ENOMEM; goto out_free; } mscm_cp_regmap = syscon_regmap_lookup_by_phandle(node, "fsl,cpucfg"); if (IS_ERR(mscm_cp_regmap)) { ret = PTR_ERR(mscm_cp_regmap); pr_err("vf610_mscm_ir: regmap lookup for cpucfg failed\n"); goto out_unmap; } regmap_read(mscm_cp_regmap, MSCM_CPxNUM, &cpuid); mscm_ir_data->cpu_mask = 0x1 << cpuid; domain = irq_domain_add_hierarchy(domain_parent, 0, MSCM_IRSPRC_NUM, node, &mscm_irq_domain_ops, mscm_ir_data); if (!domain) { ret = -ENOMEM; goto out_unmap; } cpu_pm_register_notifier(&mscm_ir_notifier_block); return 0; out_unmap: iounmap(mscm_ir_data->mscm_ir_base); out_free: kfree(mscm_ir_data); return ret; }
static int __init secure_pm_init(void) { if (omap_type() != OMAP2_DEVICE_TYPE_GP) cpu_pm_register_notifier(&secure_notifier_block); if (cpu_is_omap44xx()) ppa_service_0_index = OMAP4_PPA_SERVICE_0; else if (cpu_is_omap54xx()) ppa_service_0_index = OMAP5_PPA_SERVICE_0; return 0; }
static void __init combiner_init(void __iomem *combiner_base, struct device_node *np) { int i, irq, irq_base; unsigned int nr_irq, soc_max_nr; soc_max_nr = (soc_is_exynos5250() || soc_is_exynos542x()) ? EXYNOS5_MAX_COMBINER_NR : EXYNOS4_MAX_COMBINER_NR; if (np) { if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) { pr_warning("%s: number of combiners not specified, " "setting default as %d.\n", __func__, EXYNOS4_MAX_COMBINER_NR); max_nr = EXYNOS4_MAX_COMBINER_NR; } } else { max_nr = soc_is_exynos5250() ? EXYNOS5_MAX_COMBINER_NR : EXYNOS4_MAX_COMBINER_NR; } nr_irq = max_nr * MAX_IRQ_IN_COMBINER; irq_base = irq_alloc_descs(COMBINER_IRQ(0, 0), 1, nr_irq, 0); if (IS_ERR_VALUE(irq_base)) { irq_base = COMBINER_IRQ(0, 0); pr_warning("%s: irq desc alloc failed. Continuing with %d as linux irq base\n", __func__, irq_base); } combiner_irq_domain = irq_domain_add_legacy(np, nr_irq, irq_base, 0, &combiner_irq_domain_ops, &combiner_data); if (WARN_ON(!combiner_irq_domain)) { pr_warning("%s: irq domain init failed\n", __func__); return; } for (i = 0; i < max_nr; i++) { combiner_init_one(i, combiner_base + (i >> 2) * 0x10); irq = IRQ_SPI(i); #ifdef CONFIG_OF if (np) irq = irq_of_parse_and_map(np, i); #endif combiner_cascade_irq(i, irq); } #ifdef CONFIG_PM /* Setup suspend/resume combiner saving */ cpu_pm_register_notifier(&combiner_notifier_block); #endif }
static int arm_coresight_probe(struct platform_device *pdev) { #ifdef CONFIG_CORESIGHT_TRACE_SUPPORT static struct clk *traceclk; #endif dbgclk = clk_get(&pdev->dev, "DBGCLK"); if (IS_ERR(dbgclk)) { pr_warn("No DBGCLK is defined...\n"); dbgclk = NULL; } if (dbgclk) clk_prepare(dbgclk); arch_coresight_init(); #ifdef CONFIG_CORESIGHT_TRACE_SUPPORT /* enable etm trace by default */ if (etm_need_enabled()) { traceclk = clk_get(&pdev->dev, "TRACECLK"); if (IS_ERR(traceclk)) { pr_warn("No TRACECLK is defined...\n"); traceclk = NULL; } if (traceclk) clk_prepare_enable(traceclk); arch_enable_trace(etm_enable_mask); } #endif cpu_notifier(coresight_core_notifier, 0); cpu_pm_register_notifier(&coresight_notifier_block); return 0; }
static void vfp_pm_init(void) { cpu_pm_register_notifier(&vfp_cpu_pm_notifier_block); }
static int __init arch_timer_cpu_pm_init(void) { return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier); }
static int __init register_cpususpend_notifier(void) { return cpu_pm_register_notifier(&sunxi_cpususpend_notifier_block); }
static void __init irq_pm_init(void) { cpu_pm_register_notifier(&irq_notifier_block); }
static void __init irq_pm_init(void) { /* FIXME: Remove this when MPU OSWR support is added */ if (!soc_is_omap54xx()) cpu_pm_register_notifier(&irq_notifier_block); }
/* * VFP support code initialisation. */ static int __init vfp_init(void) { unsigned int vfpsid; unsigned int cpu_arch = cpu_architecture(); if (cpu_arch >= CPU_ARCH_ARMv6) on_each_cpu(vfp_enable, NULL, 1); /* * First check that there is a VFP that we can use. * The handler is already setup to just log calls, so * we just need to read the VFPSID register. */ vfp_vector = vfp_testing_entry; barrier(); vfpsid = fmrx(FPSID); barrier(); vfp_vector = vfp_null_entry; printk(KERN_INFO "VFP support v0.3: "); if (VFP_arch) printk("not present\n"); else if (vfpsid & FPSID_NODOUBLE) { printk("no double precision support\n"); } else { hotcpu_notifier(vfp_hotplug, 0); VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */ printk("implementor %02x architecture %d part %02x variant %x rev %x\n", (vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT, (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT, (vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT, (vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT, (vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT); vfp_vector = vfp_support_entry; thread_register_notifier(&vfp_notifier_block); cpu_pm_register_notifier(&vfp_cpu_pm_notifier_block); vfp_pm_init(); /* * We detected VFP, and the support code is * in place; report VFP support to userspace. */ elf_hwcap |= HWCAP_VFP; #ifdef CONFIG_VFPv3 if (VFP_arch >= 2) { elf_hwcap |= HWCAP_VFPv3; /* * Check for VFPv3 D16. CPUs in this configuration * only have 16 x 64bit registers. */ if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1) elf_hwcap |= HWCAP_VFPv3D16; } #endif /* * Check for the presence of the Advanced SIMD * load/store instructions, integer and single * precision floating point operations. Only check * for NEON if the hardware has the MVFR registers. */ if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { #ifdef CONFIG_NEON if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100) elf_hwcap |= HWCAP_NEON; #endif if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000) elf_hwcap |= HWCAP_VFPv4; } } return 0; }
static int __init fiq_glue_cpu_pm_init(void) { return cpu_pm_register_notifier(&fiq_glue_cpu_pm_notifier); }
static int __init coresight_pm_init(void) { cpu_pm_register_notifier(&coresight_notifier_block); return 0; }
static void fpsimd_pm_init(void) { cpu_pm_register_notifier(&fpsimd_cpu_pm_notifier_block); }
static void __init hw_breakpoint_pm_init(void) { cpu_pm_register_notifier(&hw_breakpoint_cpu_pm_nb); }