static int __init mtrr_init_finialize(void) { if (!mtrr_if) return 0; if (use_intel()) { if (!changed_by_mtrr_cleanup) mtrr_state_warn(); return 0; } /* * The CPU has no MTRR and seems to not support SMP. They have * specific drivers, we use a tricky method to support * suspend/resume for them. * * TBD: is there any system with such CPU which supports * suspend/resume? If no, we should remove the code. */ register_syscore_ops(&mtrr_syscore_ops); return 0; }
static void s3c64xx_clk_sleep_init(void) { s3c64xx_save_common = samsung_clk_alloc_reg_dump(s3c64xx_clk_regs, ARRAY_SIZE(s3c64xx_clk_regs)); if (!s3c64xx_save_common) goto err_warn; if (!is_s3c6400) { s3c64xx_save_soc = samsung_clk_alloc_reg_dump(s3c6410_clk_regs, ARRAY_SIZE(s3c6410_clk_regs)); if (!s3c64xx_save_soc) goto err_soc; } register_syscore_ops(&s3c64xx_clk_syscore_ops); return; err_soc: kfree(s3c64xx_save_common); err_warn: pr_warn("%s: failed to allocate sleep save data, no sleep support!\n", __func__); }
static int __init alchemy_usb_init(void) { switch (alchemy_get_cputype()) { case ALCHEMY_CPU_AU1000: case ALCHEMY_CPU_AU1500: case ALCHEMY_CPU_AU1100: au1000_usb_init(AU1000_USB_OHCI_PHYS_ADDR, AU1000_OHCICFG); break; case ALCHEMY_CPU_AU1550: au1000_usb_init(AU1550_USB_OHCI_PHYS_ADDR, AU1550_OHCICFG); break; case ALCHEMY_CPU_AU1200: au1200_usb_init(); break; case ALCHEMY_CPU_AU1300: au1300_usb_init(); break; } register_syscore_ops(&alchemy_usb_pm_ops); return 0; }
static int __init combiner_of_init(struct device_node *np, struct device_node *parent) { void __iomem *combiner_base; combiner_base = of_iomap(np, 0); if (!combiner_base) { pr_err("%s: failed to map combiner registers\n", __func__); return -ENXIO; } if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) { pr_info("%s: number of combiners not specified, " "setting default as %d.\n", __func__, max_nr); } combiner_init(combiner_base, np); register_syscore_ops(&combiner_syscore_ops); return 0; }
static int __init syscnt_assist_init_ops(void) { register_syscore_ops(&syscnt_assist_syscore_ops); return 0; }
static int __init sched_clock_syscore_init(void) { register_syscore_ops(&sched_clock_ops); return 0; }
static int __init irqmux_syscore_init(void) { register_syscore_ops(&irqmux_syscore); return 0; }
/* register exynos_audss clocks */ static void __init exynos_audss_clk_init(struct device_node *np) { reg_base = of_iomap(np, 0); if (!reg_base) { pr_err("%s: failed to map audss registers\n", __func__); return; } clk_table = kzalloc(sizeof(struct clk *) * EXYNOS_AUDSS_MAX_CLKS, GFP_KERNEL); if (!clk_table) { pr_err("%s: could not allocate clk lookup table\n", __func__); return; } clk_data.clks = clk_table; clk_data.clk_num = EXYNOS_AUDSS_MAX_CLKS; of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); clk_table[EXYNOS_MOUT_AUDSS] = clk_register_mux(NULL, "mout_audss", mout_audss_p, ARRAY_SIZE(mout_audss_p), CLK_SET_RATE_NO_REPARENT, reg_base + ASS_CLK_SRC, 0, 1, 0, &lock); clk_table[EXYNOS_MOUT_I2S] = clk_register_mux(NULL, "mout_i2s", mout_i2s_p, ARRAY_SIZE(mout_i2s_p), CLK_SET_RATE_NO_REPARENT, reg_base + ASS_CLK_SRC, 2, 2, 0, &lock); clk_table[EXYNOS_DOUT_SRP] = clk_register_divider(NULL, "dout_srp", "mout_audss", 0, reg_base + ASS_CLK_DIV, 0, 4, 0, &lock); clk_table[EXYNOS_DOUT_AUD_BUS] = clk_register_divider(NULL, "dout_aud_bus", "dout_srp", 0, reg_base + ASS_CLK_DIV, 4, 4, 0, &lock); clk_table[EXYNOS_DOUT_I2S] = clk_register_divider(NULL, "dout_i2s", "mout_i2s", 0, reg_base + ASS_CLK_DIV, 8, 4, 0, &lock); clk_table[EXYNOS_SRP_CLK] = clk_register_gate(NULL, "srp_clk", "dout_srp", CLK_SET_RATE_PARENT, reg_base + ASS_CLK_GATE, 0, 0, &lock); clk_table[EXYNOS_I2S_BUS] = clk_register_gate(NULL, "i2s_bus", "dout_aud_bus", CLK_SET_RATE_PARENT, reg_base + ASS_CLK_GATE, 2, 0, &lock); clk_table[EXYNOS_SCLK_I2S] = clk_register_gate(NULL, "sclk_i2s", "dout_i2s", CLK_SET_RATE_PARENT, reg_base + ASS_CLK_GATE, 3, 0, &lock); clk_table[EXYNOS_PCM_BUS] = clk_register_gate(NULL, "pcm_bus", "sclk_pcm", CLK_SET_RATE_PARENT, reg_base + ASS_CLK_GATE, 4, 0, &lock); clk_table[EXYNOS_SCLK_PCM] = clk_register_gate(NULL, "sclk_pcm", "div_pcm0", CLK_SET_RATE_PARENT, reg_base + ASS_CLK_GATE, 5, 0, &lock); #ifdef CONFIG_PM_SLEEP register_syscore_ops(&exynos_audss_clk_syscore_ops); #endif pr_info("Exynos: Audss: clock setup completed\n"); }
static int __init hisik3v2_wdt_init(void) { register_syscore_ops(&wdt_syscore_ops); return platform_driver_register(&hisik3v2_wdt_driver); }
void __init tegra_init_timer(struct device_node *np) { struct clk *clk; int ret; unsigned long rate; struct resource res; if (of_address_to_resource(np, 0, &res)) { pr_err("%s:No memory resources found\n", __func__); return; } timer_reg_base = ioremap(res.start, resource_size(&res)); if (!timer_reg_base) { pr_err("%s:Can't map timer registers\n", __func__); BUG(); } timer_reg_base_pa = res.start; tegra_timer_irq.irq = irq_of_parse_and_map(np, 0); if (tegra_timer_irq.irq <= 0) { pr_err("%s:Failed to map timer IRQ\n", __func__); BUG(); } clk = of_clk_get(np, 0); if (IS_ERR(clk)) clk = clk_get_sys("timer", NULL); if (IS_ERR(clk)) { pr_warn("Unable to get timer clock. Assuming 12Mhz input clock.\n"); rate = 12000000; } else { clk_prepare_enable(clk); rate = clk_get_rate(clk); } switch (rate) { case 12000000: timer_writel(0x000b, TIMERUS_USEC_CFG); break; case 12800000: timer_writel(0x043F, TIMERUS_USEC_CFG); break; case 13000000: timer_writel(0x000c, TIMERUS_USEC_CFG); break; case 19200000: timer_writel(0x045f, TIMERUS_USEC_CFG); break; case 26000000: timer_writel(0x0019, TIMERUS_USEC_CFG); break; #ifndef CONFIG_ARCH_TEGRA_2x_SOC case 16800000: timer_writel(0x0453, TIMERUS_USEC_CFG); break; case 38400000: timer_writel(0x04BF, TIMERUS_USEC_CFG); break; case 48000000: timer_writel(0x002F, TIMERUS_USEC_CFG); break; #endif default: if (tegra_platform_is_qt()) { timer_writel(0x000c, TIMERUS_USEC_CFG); break; } WARN(1, "Unknown clock rate"); } #ifdef CONFIG_PM_SLEEP hotplug_cpu_register(np); #endif of_node_put(np); #ifdef CONFIG_ARCH_TEGRA_2x_SOC tegra20_init_timer(); #else tegra30_init_timer(); #endif ret = clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US, "timer_us", 1000000, 300, 32, clocksource_mmio_readl_up); if (ret) { pr_err("%s: Failed to register clocksource: %d\n", __func__, ret); BUG(); } ret = setup_irq(tegra_timer_irq.irq, &tegra_timer_irq); if (ret) { pr_err("%s: Failed to register timer IRQ: %d\n", __func__, ret); BUG(); } clockevents_calc_mult_shift(&tegra_clockevent, 1000000, 5); tegra_clockevent.max_delta_ns = clockevent_delta2ns(0x1fffffff, &tegra_clockevent); tegra_clockevent.min_delta_ns = clockevent_delta2ns(0x1, &tegra_clockevent); tegra_clockevent.cpumask = cpu_all_mask; tegra_clockevent.irq = tegra_timer_irq.irq; clockevents_register_device(&tegra_clockevent); #ifndef CONFIG_ARM64 #ifdef CONFIG_ARM_ARCH_TIMER /* Architectural timers take precedence over broadcast timers. Only register a broadcast clockevent device if architectural timers do not exist or cannot be initialized. */ if (tegra_init_arch_timer()) #endif /* Architectural timers do not exist or cannot be initialzied. Fall back to using the broadcast timer as the sched clock. */ setup_sched_clock(tegra_read_sched_clock, 32, 1000000); #endif register_syscore_ops(&tegra_timer_syscore_ops); #ifndef CONFIG_ARM64 late_time_init = tegra_init_late_timer; #endif //arm_delay_ops.delay = __tegra_delay; //arm_delay_ops.const_udelay = __tegra_const_udelay; //arm_delay_ops.udelay = __tegra_udelay; }
/* * First routine called when the kernel module is loaded */ static int __init tf_device_register(void) { int error; struct tf_device *dev = &g_tf_dev; dprintk(KERN_INFO "tf_device_register()\n"); /* * Initialize the device */ dev->dev_number = MKDEV(device_major_number, TF_DEVICE_MINOR_NUMBER); cdev_init(&dev->cdev, &g_tf_device_file_ops); dev->cdev.owner = THIS_MODULE; INIT_LIST_HEAD(&dev->connection_list); spin_lock_init(&dev->connection_list_lock); #if defined(MODULE) && defined(CONFIG_TF_ZEBRA) error = (*tf_comm_early_init)(); if (error) goto module_early_init_failed; error = tf_device_mshield_init(smc_mem); if (error) goto mshield_init_failed; #ifdef CONFIG_TF_DRIVER_CRYPTO_FIPS error = tf_crypto_hmac_module_init(); if (error) goto hmac_init_failed; error = tf_self_test_register_device(); if (error) goto self_test_register_device_failed; #endif #endif /* register the sysfs object driver stats */ error = kobject_init_and_add(&dev->kobj, &tf_ktype, NULL, "%s", TF_DEVICE_BASE_NAME); if (error) { printk(KERN_ERR "tf_device_register(): " "kobject_init_and_add failed (error %d)!\n", error); kobject_put(&dev->kobj); goto kobject_init_and_add_failed; } register_syscore_ops((struct syscore_ops *)&g_tf_syscore_ops); /* * Register the char device. */ printk(KERN_INFO "Registering char device %s (%u:%u)\n", TF_DEVICE_BASE_NAME, MAJOR(dev->dev_number), MINOR(dev->dev_number)); error = register_chrdev_region(dev->dev_number, 1, TF_DEVICE_BASE_NAME); if (error != 0) { printk(KERN_ERR "tf_device_register():" " register_chrdev_region failed (error %d)!\n", error); goto register_chrdev_region_failed; } error = cdev_add(&dev->cdev, dev->dev_number, 1); if (error != 0) { printk(KERN_ERR "tf_device_register(): " "cdev_add failed (error %d)!\n", error); goto cdev_add_failed; } /* * Initialize the communication with the Secure World. */ #ifdef CONFIG_TF_TRUSTZONE dev->sm.soft_int_irq = soft_interrupt; #endif error = tf_init(&g_tf_dev.sm); if (error != S_SUCCESS) { dprintk(KERN_ERR "tf_device_register(): " "tf_init failed (error %d)!\n", error); goto init_failed; } #ifdef CONFIG_TF_DRIVER_CRYPTO_FIPS error = tf_self_test_post_init(&(g_tf_dev.kobj)); /* N.B. error > 0 indicates a POST failure, which will not prevent the module from loading. */ if (error < 0) { dprintk(KERN_ERR "tf_device_register(): " "tf_self_test_post_vectors failed (error %d)!\n", error); goto post_failed; } #endif #ifdef CONFIG_ANDROID tf_class = class_create(THIS_MODULE, TF_DEVICE_BASE_NAME); device_create(tf_class, NULL, dev->dev_number, NULL, TF_DEVICE_BASE_NAME); #endif #ifdef CONFIG_TF_ZEBRA /* * Initializes the /dev/tf_ctrl device node. */ error = tf_ctrl_device_register(); if (error) goto ctrl_failed; #endif #ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT address_cache_property((unsigned long) &tf_device_register); #endif /* * Successful completion. */ dprintk(KERN_INFO "tf_device_register(): Success\n"); return 0; /* * Error: undo all operations in the reverse order */ #ifdef CONFIG_TF_ZEBRA ctrl_failed: #endif #ifdef CONFIG_TF_DRIVER_CRYPTO_FIPS tf_self_test_post_exit(); post_failed: #endif init_failed: cdev_del(&dev->cdev); cdev_add_failed: unregister_chrdev_region(dev->dev_number, 1); register_chrdev_region_failed: unregister_syscore_ops((struct syscore_ops *)&g_tf_syscore_ops); kobject_init_and_add_failed: kobject_del(&g_tf_dev.kobj); #if defined(MODULE) && defined(CONFIG_TF_ZEBRA) #ifdef CONFIG_TF_DRIVER_CRYPTO_FIPS tf_self_test_unregister_device(); self_test_register_device_failed: tf_crypto_hmac_module_exit(); hmac_init_failed: #endif tf_device_mshield_exit(); mshield_init_failed: module_early_init_failed: #endif dprintk(KERN_INFO "tf_device_register(): Failure (error %d)\n", error); return error; }
void acpi_processor_syscore_init(void) { register_syscore_ops(&acpi_processor_syscore_ops); }
static int __init sirfsoc_irq_pm_init(void) { register_syscore_ops(&sirfsoc_irq_syscore_ops); return 0; }
void __init exynos4212_register_clocks(void) { int ptr; /* usbphy1 is removed in exynos 4212 */ exynos4_clkset_group_list[4] = NULL; /* mout_mpll_user is used instead of mout_mpll in exynos 4212 */ exynos4_clkset_group_list[6] = &exynos4212_clk_mout_mpll_user.clk; exynos4_clkset_aclk_top_list[0] = &exynos4212_clk_mout_mpll_user.clk; exynos4_clkset_mout_mfc0_list[0] = &exynos4212_clk_mout_mpll_user.clk; exynos4_clk_mout_mpll.reg_src.reg = EXYNOS4_CLKSRC_DMC; exynos4_clk_mout_mpll.reg_src.shift = 12; exynos4_clk_mout_mpll.reg_src.size = 1; exynos4_clk_aclk_200.sources = &exynos4212_clkset_aclk_200; exynos4_clk_aclk_200.reg_src.reg = EXYNOS4_CLKSRC_TOP1; exynos4_clk_aclk_200.reg_src.shift = 20; exynos4_clk_aclk_200.reg_src.size = 1; exynos4_clk_fimg2d.enable = exynos4_clk_ip_dmc_ctrl; exynos4_clk_fimg2d.ctrlbit = (1 << 23); exynos4_clk_mout_g2d0.reg_src.reg = EXYNOS4_CLKSRC_DMC; exynos4_clk_mout_g2d0.reg_src.shift = 20; exynos4_clk_mout_g2d0.reg_src.size = 1; exynos4_clk_mout_g2d1.reg_src.reg = EXYNOS4_CLKSRC_DMC; exynos4_clk_mout_g2d1.reg_src.shift = 24; exynos4_clk_mout_g2d1.reg_src.size = 1; exynos4_clk_sclk_fimg2d.reg_src.reg = EXYNOS4_CLKSRC_DMC; exynos4_clk_sclk_fimg2d.reg_src.shift = 28; exynos4_clk_sclk_fimg2d.reg_src.size = 1; exynos4_clk_sclk_fimg2d.reg_div.reg = EXYNOS4_CLKDIV_DMC1; exynos4_clk_sclk_fimg2d.reg_div.shift = 0; exynos4_clk_sclk_fimg2d.reg_div.size = 4; exynos4_epll_ops.get_rate = exynos4212_epll_get_rate; exynos4_epll_ops.set_rate = exynos4212_epll_set_rate; exynos4_vpll_ops.get_rate = exynos4212_vpll_get_rate; exynos4_vpll_ops.set_rate = exynos4212_vpll_set_rate; for (ptr = 0; ptr < ARRAY_SIZE(exynos4212_sysclks); ptr++) s3c_register_clksrc(exynos4212_sysclks[ptr], 1); s3c_register_clksrc(exynos4212_clksrcs, ARRAY_SIZE(exynos4212_clksrcs)); s3c_register_clocks(exynos4212_init_clocks, ARRAY_SIZE(exynos4212_init_clocks)); s3c_register_clocks(exynos4212_init_clocks_off, ARRAY_SIZE(exynos4212_init_clocks_off)); s3c_disable_clocks(exynos4212_init_clocks_off, ARRAY_SIZE(exynos4212_init_clocks_off)); s3c_register_clksrc(&exynos4212_clk_isp_srcs_div0, 1); s3c_register_clksrc(exynos4212_clk_isp_srcs, ARRAY_SIZE(exynos4212_clk_isp_srcs)); s3c_register_clocks(exynos4212_clk_isp, ARRAY_SIZE(exynos4212_clk_isp)); s3c_disable_clocks(&exynos4212_clk_isp_srcs[3].clk, 1); s3c_disable_clocks(&exynos4212_clk_isp_srcs[4].clk, 1); s3c_disable_clocks(&exynos4212_clk_isp_srcs[5].clk, 1); s3c_disable_clocks(&exynos4212_clk_isp_srcs[6].clk, 1); register_syscore_ops(&exynos4212_clock_syscore_ops); }
static int __init fiq_glue_syscore_init(void) { register_syscore_ops(&fiq_glue_syscore_ops); return 0; }
static void rk3288_clk_sleep_init(void __iomem *reg_base) { rk3288_cru_base = reg_base; register_syscore_ops(&rk3288_clk_syscore_ops); }
static int __init timer_init_syscore_ops(void) { register_syscore_ops(&timer_syscore_ops); return 0; }
static void __init armada_370_xp_timer_common_init(struct device_node *np) { u32 clr = 0, set = 0; int res; timer_base = of_iomap(np, 0); WARN_ON(!timer_base); local_base = of_iomap(np, 1); if (timer25Mhz) { set = TIMER0_25MHZ; enable_mask = TIMER0_EN; } else { clr = TIMER0_25MHZ; enable_mask = TIMER0_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT); } atomic_io_modify(timer_base + TIMER_CTRL_OFF, clr | set, set); local_timer_ctrl_clrset(clr, set); /* * We use timer 0 as clocksource, and private(local) timer 0 * for clockevents */ armada_370_xp_clkevt_irq = irq_of_parse_and_map(np, 4); ticks_per_jiffy = (timer_clk + HZ / 2) / HZ; /* * Setup free-running clocksource timer (interrupts * disabled). */ writel(0xffffffff, timer_base + TIMER0_VAL_OFF); writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF); atomic_io_modify(timer_base + TIMER_CTRL_OFF, TIMER0_RELOAD_EN | enable_mask, TIMER0_RELOAD_EN | enable_mask); /* * Set scale and timer for sched_clock. */ sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk); clocksource_mmio_init(timer_base + TIMER0_VAL_OFF, "armada_370_xp_clocksource", timer_clk, 300, 32, clocksource_mmio_readl_down); register_cpu_notifier(&armada_370_xp_timer_cpu_nb); armada_370_xp_evt = alloc_percpu(struct clock_event_device); /* * Setup clockevent timer (interrupt-driven). */ res = request_percpu_irq(armada_370_xp_clkevt_irq, armada_370_xp_timer_interrupt, "armada_370_xp_per_cpu_tick", armada_370_xp_evt); /* Immediately configure the timer on the boot CPU */ if (!res) armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt)); register_syscore_ops(&armada_370_xp_timer_syscore_ops); }
static __init int s3c64xx_syscore_init(void) { register_syscore_ops(&s3c64xx_irq_syscore_ops); return 0; }
static int __init armada_370_xp_timer_common_init(struct device_node *np) { u32 clr = 0, set = 0; int res; timer_base = of_iomap(np, 0); if (!timer_base) { pr_err("Failed to iomap"); return -ENXIO; } local_base = of_iomap(np, 1); if (!local_base) { pr_err("Failed to iomap"); return -ENXIO; } if (timer25Mhz) { set = TIMER0_25MHZ; enable_mask = TIMER0_EN; } else { clr = TIMER0_25MHZ; enable_mask = TIMER0_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT); } atomic_io_modify(timer_base + TIMER_CTRL_OFF, clr | set, set); local_timer_ctrl_clrset(clr, set); /* * We use timer 0 as clocksource, and private(local) timer 0 * for clockevents */ armada_370_xp_clkevt_irq = irq_of_parse_and_map(np, 4); ticks_per_jiffy = (timer_clk + HZ / 2) / HZ; /* * Setup free-running clocksource timer (interrupts * disabled). */ writel(0xffffffff, timer_base + TIMER0_VAL_OFF); writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF); atomic_io_modify(timer_base + TIMER_CTRL_OFF, TIMER0_RELOAD_EN | enable_mask, TIMER0_RELOAD_EN | enable_mask); armada_370_delay_timer.freq = timer_clk; register_current_timer_delay(&armada_370_delay_timer); /* * Set scale and timer for sched_clock. */ sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk); res = clocksource_mmio_init(timer_base + TIMER0_VAL_OFF, "armada_370_xp_clocksource", timer_clk, 300, 32, clocksource_mmio_readl_down); if (res) { pr_err("Failed to initialize clocksource mmio"); return res; } armada_370_xp_evt = alloc_percpu(struct clock_event_device); if (!armada_370_xp_evt) return -ENOMEM; /* * Setup clockevent timer (interrupt-driven). */ res = request_percpu_irq(armada_370_xp_clkevt_irq, armada_370_xp_timer_interrupt, "armada_370_xp_per_cpu_tick", armada_370_xp_evt); /* Immediately configure the timer on the boot CPU */ if (res) { pr_err("Failed to request percpu irq"); return res; } res = cpuhp_setup_state(CPUHP_AP_ARMADA_TIMER_STARTING, "clockevents/armada:starting", armada_370_xp_timer_starting_cpu, armada_370_xp_timer_dying_cpu); if (res) { pr_err("Failed to setup hotplug state and timer"); return res; } register_syscore_ops(&armada_370_xp_timer_syscore_ops); return 0; }
void __init exynos4212_register_clocks(void) { int ptr; unsigned int tmp; /* usbphy1 is removed in exynos 4212 */ exynos4_clkset_group_list[4] = NULL; /* mout_mpll_user is used instead of mout_mpll in exynos 4212 */ exynos4_clkset_group_list[6] = &exynos4212_clk_mout_mpll_user.clk; exynos4_clkset_aclk_top_list[0] = &exynos4212_clk_mout_mpll_user.clk; exynos4_clkset_mout_mfc0_list[0] = &exynos4212_clk_mout_mpll_user.clk; exynos4_clk_mout_mpll.reg_src.reg = EXYNOS4_CLKSRC_DMC; exynos4_clk_mout_mpll.reg_src.shift = 12; exynos4_clk_mout_mpll.reg_src.size = 1; exynos4_clk_aclk_200.sources = &exynos4212_clkset_aclk_200; exynos4_clk_aclk_200.reg_src.reg = EXYNOS4_CLKSRC_TOP1; exynos4_clk_aclk_200.reg_src.shift = 20; exynos4_clk_aclk_200.reg_src.size = 1; exynos4_clk_fimg2d.enable = exynos4_clk_ip_dmc_ctrl; exynos4_clk_fimg2d.ctrlbit = (1 << 23); exynos4_clk_mout_g2d0.reg_src.reg = EXYNOS4_CLKSRC_DMC; exynos4_clk_mout_g2d0.reg_src.shift = 20; exynos4_clk_mout_g2d0.reg_src.size = 1; exynos4_clk_mout_g2d1.reg_src.reg = EXYNOS4_CLKSRC_DMC; exynos4_clk_mout_g2d1.reg_src.shift = 24; exynos4_clk_mout_g2d1.reg_src.size = 1; exynos4_clk_sclk_fimg2d.reg_src.reg = EXYNOS4_CLKSRC_DMC; exynos4_clk_sclk_fimg2d.reg_src.shift = 28; exynos4_clk_sclk_fimg2d.reg_src.size = 1; exynos4_clk_sclk_fimg2d.reg_div.reg = EXYNOS4_CLKDIV_DMC1; exynos4_clk_sclk_fimg2d.reg_div.shift = 0; exynos4_clk_sclk_fimg2d.reg_div.size = 4; exynos4_epll_ops.get_rate = exynos4212_epll_get_rate; exynos4_epll_ops.set_rate = exynos4212_epll_set_rate; exynos4_vpll_ops.get_rate = exynos4212_vpll_get_rate; exynos4_vpll_ops.set_rate = exynos4212_vpll_set_rate; for (ptr = 0; ptr < ARRAY_SIZE(exynos4212_sysclks); ptr++) s3c_register_clksrc(exynos4212_sysclks[ptr], 1); s3c_register_clksrc(exynos4212_clksrcs, ARRAY_SIZE(exynos4212_clksrcs)); s3c_register_clocks(exynos4212_init_clocks, ARRAY_SIZE(exynos4212_init_clocks)); s3c_register_clocks(exynos4212_init_clocks_off, ARRAY_SIZE(exynos4212_init_clocks_off)); s3c_disable_clocks(exynos4212_init_clocks_off, ARRAY_SIZE(exynos4212_init_clocks_off)); s3c_register_clksrc(&exynos4212_clk_isp_srcs_div0, 1); s3c_register_clksrc(exynos4212_clk_isp_srcs, ARRAY_SIZE(exynos4212_clk_isp_srcs)); s3c_register_clocks(exynos4212_clk_isp, ARRAY_SIZE(exynos4212_clk_isp)); s3c_disable_clocks(&exynos4212_clk_isp_srcs[3].clk, 1); s3c_disable_clocks(&exynos4212_clk_isp_srcs[4].clk, 1); s3c_disable_clocks(&exynos4212_clk_isp_srcs[5].clk, 1); s3c_disable_clocks(&exynos4212_clk_isp_srcs[6].clk, 1); /* To save power, * Disable CLKOUT of LEFTBUS, RIGHTBUS, TOP, DMC, CPU and ISP */ for (ptr = 0 ; ptr < ARRAY_SIZE(exynos4x12_cmu_config) ; ptr++) { tmp = __raw_readl(exynos4x12_cmu_config[ptr].reg); tmp &= ~(0x1 << 16); tmp |= (exynos4x12_cmu_config[ptr].val << 16); __raw_writel(tmp, exynos4x12_cmu_config[ptr].reg); } register_syscore_ops(&exynos4212_clock_syscore_ops); }
static void __init init_suspend_resume(void) { register_syscore_ops(&oprofile_syscore_ops); }
void __init mvebu_clk_gating_setup(struct device_node *np, const struct clk_gating_soc_desc *desc) { struct clk *clk; void __iomem *base; const char *default_parent = NULL; int n; if (ctrl) { pr_err("mvebu-clk-gating: cannot instantiate more than one gatable clock device\n"); return; } base = of_iomap(np, 0); if (WARN_ON(!base)) return; clk = of_clk_get(np, 0); if (!IS_ERR(clk)) { default_parent = __clk_get_name(clk); clk_put(clk); } ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); if (WARN_ON(!ctrl)) goto ctrl_out; /* lock must already be initialized */ ctrl->lock = &ctrl_gating_lock; ctrl->base = base; /* Count, allocate, and register clock gates */ for (n = 0; desc[n].name;) n++; ctrl->num_gates = n; ctrl->gates = kzalloc(ctrl->num_gates * sizeof(struct clk *), GFP_KERNEL); if (WARN_ON(!ctrl->gates)) goto gates_out; for (n = 0; n < ctrl->num_gates; n++) { const char *parent = (desc[n].parent) ? desc[n].parent : default_parent; ctrl->gates[n] = clk_register_gate(NULL, desc[n].name, parent, desc[n].flags, base, desc[n].bit_idx, 0, ctrl->lock); WARN_ON(IS_ERR(ctrl->gates[n])); } of_clk_add_provider(np, clk_gating_get_src, ctrl); register_syscore_ops(&clk_gate_syscore_ops); return; gates_out: kfree(ctrl); ctrl_out: iounmap(base); }
static void __init jive_machine_init(void) { /* register system core operations for managing low level suspend */ register_syscore_ops(&jive_pm_syscore_ops); /* write our sleep configurations for the IO. Pull down all unused * IO, ensure that we have turned off all peripherals we do not * need, and configure the ones we do need. */ /* Port B sleep */ __raw_writel(S3C2412_SLPCON_IN(0) | S3C2412_SLPCON_PULL(1) | S3C2412_SLPCON_HIGH(2) | S3C2412_SLPCON_PULL(3) | S3C2412_SLPCON_PULL(4) | S3C2412_SLPCON_PULL(5) | S3C2412_SLPCON_PULL(6) | S3C2412_SLPCON_HIGH(7) | S3C2412_SLPCON_PULL(8) | S3C2412_SLPCON_PULL(9) | S3C2412_SLPCON_PULL(10), S3C2412_GPBSLPCON); /* Port C sleep */ __raw_writel(S3C2412_SLPCON_PULL(0) | S3C2412_SLPCON_PULL(1) | S3C2412_SLPCON_PULL(2) | S3C2412_SLPCON_PULL(3) | S3C2412_SLPCON_PULL(4) | S3C2412_SLPCON_PULL(5) | S3C2412_SLPCON_LOW(6) | S3C2412_SLPCON_PULL(6) | S3C2412_SLPCON_PULL(7) | S3C2412_SLPCON_PULL(8) | S3C2412_SLPCON_PULL(9) | S3C2412_SLPCON_PULL(10) | S3C2412_SLPCON_PULL(11) | S3C2412_SLPCON_PULL(12) | S3C2412_SLPCON_PULL(13) | S3C2412_SLPCON_PULL(14) | S3C2412_SLPCON_PULL(15), S3C2412_GPCSLPCON); /* Port D sleep */ __raw_writel(S3C2412_SLPCON_ALL_PULL, S3C2412_GPDSLPCON); /* Port F sleep */ __raw_writel(S3C2412_SLPCON_LOW(0) | S3C2412_SLPCON_LOW(1) | S3C2412_SLPCON_LOW(2) | S3C2412_SLPCON_EINT(3) | S3C2412_SLPCON_EINT(4) | S3C2412_SLPCON_EINT(5) | S3C2412_SLPCON_EINT(6) | S3C2412_SLPCON_EINT(7), S3C2412_GPFSLPCON); /* Port G sleep */ __raw_writel(S3C2412_SLPCON_IN(0) | S3C2412_SLPCON_IN(1) | S3C2412_SLPCON_IN(2) | S3C2412_SLPCON_IN(3) | S3C2412_SLPCON_IN(4) | S3C2412_SLPCON_IN(5) | S3C2412_SLPCON_IN(6) | S3C2412_SLPCON_IN(7) | S3C2412_SLPCON_PULL(8) | S3C2412_SLPCON_PULL(9) | S3C2412_SLPCON_IN(10) | S3C2412_SLPCON_PULL(11) | S3C2412_SLPCON_PULL(12) | S3C2412_SLPCON_PULL(13) | S3C2412_SLPCON_IN(14) | S3C2412_SLPCON_PULL(15), S3C2412_GPGSLPCON); /* Port H sleep */ __raw_writel(S3C2412_SLPCON_PULL(0) | S3C2412_SLPCON_PULL(1) | S3C2412_SLPCON_PULL(2) | S3C2412_SLPCON_PULL(3) | S3C2412_SLPCON_PULL(4) | S3C2412_SLPCON_PULL(5) | S3C2412_SLPCON_PULL(6) | S3C2412_SLPCON_IN(7) | S3C2412_SLPCON_IN(8) | S3C2412_SLPCON_PULL(9) | S3C2412_SLPCON_IN(10), S3C2412_GPHSLPCON); /* initialise the power management now we've setup everything. */ s3c_pm_init(); /** TODO - check that this is after the cmdline option! */ s3c_nand_set_platdata(&jive_nand_info); /* initialise the spi */ gpio_request(S3C2410_GPG(13), "lcm reset"); gpio_direction_output(S3C2410_GPG(13), 0); gpio_request(S3C2410_GPB(7), "jive spi"); gpio_direction_output(S3C2410_GPB(7), 1); gpio_request_one(S3C2410_GPB(6), GPIOF_OUT_INIT_LOW, NULL); gpio_free(S3C2410_GPB(6)); gpio_request_one(S3C2410_GPG(8), GPIOF_OUT_INIT_HIGH, NULL); gpio_free(S3C2410_GPG(8)); /* initialise the WM8750 spi */ gpio_request(S3C2410_GPH(10), "jive wm8750 spi"); gpio_direction_output(S3C2410_GPH(10), 1); /* Turn off suspend on both USB ports, and switch the * selectable USB port to USB device mode. */ s3c2410_modify_misccr(S3C2410_MISCCR_USBHOST | S3C2410_MISCCR_USBSUSPND0 | S3C2410_MISCCR_USBSUSPND1, 0x0); s3c24xx_udc_set_platdata(&jive_udc_cfg); s3c24xx_fb_set_platdata(&jive_lcd_config); spi_register_board_info(jive_spi_devs, ARRAY_SIZE(jive_spi_devs)); s3c_i2c0_set_platdata(&jive_i2c_cfg); i2c_register_board_info(0, jive_i2c_devs, ARRAY_SIZE(jive_i2c_devs)); pm_power_off = jive_power_off; platform_add_devices(jive_devices, ARRAY_SIZE(jive_devices)); }
static __init int exynos4_pm_syscore_init(void) { register_syscore_ops(&exynos4_pm_syscore_ops); return 0; }
static void perf_ibs_pm_init(void) { register_syscore_ops(&perf_ibs_syscore_ops); }
static void vfp_pm_init(void) { register_syscore_ops(&vfp_pm_syscore_ops); }
static int __init sa1100_gpio_init_devicefs(void) { register_syscore_ops(&sa1100_gpio_syscore_ops); return 0; }
static int __devinit pm8038_probe(struct platform_device *pdev) { const struct pm8038_platform_data *pdata = pdev->dev.platform_data; const char *revision_name = "unknown"; struct pm8038 *pmic; enum pm8xxx_version version; int revision; int rc; u8 val; if (!pdata) { pr_err("missing platform data\n"); return -EINVAL; } pmic = kzalloc(sizeof(struct pm8038), GFP_KERNEL); if (!pmic) { pr_err("Cannot alloc pm8038 struct\n"); return -ENOMEM; } /* Read PMIC chip revision */ rc = msm_ssbi_read(pdev->dev.parent, REG_HWREV, &val, sizeof(val)); if (rc) { pr_err("Failed to read hw rev reg %d:rc=%d\n", REG_HWREV, rc); goto err_read_rev; } pr_info("PMIC revision 1: PM8038 rev %02X\n", val); pmic->rev_registers = val; /* Read PMIC chip revision 2 */ rc = msm_ssbi_read(pdev->dev.parent, REG_HWREV_2, &val, sizeof(val)); if (rc) { pr_err("Failed to read hw rev 2 reg %d:rc=%d\n", REG_HWREV_2, rc); goto err_read_rev; } pr_info("PMIC revision 2: PM8038 rev %02X\n", val); pmic->rev_registers |= val << BITS_PER_BYTE; pmic->dev = &pdev->dev; pm8038_drvdata.pm_chip_data = pmic; platform_set_drvdata(pdev, &pm8038_drvdata); /* Print out human readable version and revision names. */ version = pm8xxx_get_version(pmic->dev); if (version == PM8XXX_VERSION_8038) { revision = pm8xxx_get_revision(pmic->dev); if (revision >= 0 && revision < ARRAY_SIZE(pm8038_rev_names)) revision_name = pm8038_rev_names[revision]; pr_info("PMIC version: PM8038 ver %s\n", revision_name); } else { WARN_ON(version != PM8XXX_VERSION_8038); } /* Log human readable restart reason */ rc = msm_ssbi_read(pdev->dev.parent, REG_PM8038_PON_CNTRL_3, &val, 1); if (rc) { pr_err("Cannot read restart reason rc=%d\n", rc); goto err_read_rev; } val &= PM8038_RESTART_REASON_MASK; pr_info("PMIC Restart Reason: %s\n", pm8038_restart_reason[val]); rc = pm8038_add_subdevices(pdata, pmic); if (rc) { pr_err("Cannot add subdevices rc=%d\n", rc); goto err; } pmic8038_chip = pmic; register_syscore_ops(&pm8038_pm); return 0; err: mfd_remove_devices(pmic->dev); platform_set_drvdata(pdev, NULL); kfree(pmic->mfd_regulators); kfree(pmic->regulator_cdata); err_read_rev: kfree(pmic); return rc; }
static int __init imx_gpcv2_irqchip_init(struct device_node *node, struct device_node *parent) { struct irq_domain *parent_domain, *domain; struct gpcv2_irqchip_data *cd; int i; if (!parent) { pr_err("%s: no parent, giving up\n", node->full_name); return -ENODEV; } parent_domain = irq_find_host(parent); if (!parent_domain) { pr_err("%s: unable to get parent domain\n", node->full_name); return -ENXIO; } cd = kzalloc(sizeof(struct gpcv2_irqchip_data), GFP_KERNEL); if (!cd) { pr_err("kzalloc failed!\n"); return -ENOMEM; } cd->gpc_base = of_iomap(node, 0); if (!cd->gpc_base) { pr_err("fsl-gpcv2: unable to map gpc registers\n"); kfree(cd); return -ENOMEM; } domain = irq_domain_add_hierarchy(parent_domain, 0, GPC_MAX_IRQS, node, &gpcv2_irqchip_data_domain_ops, cd); if (!domain) { iounmap(cd->gpc_base); kfree(cd); return -ENOMEM; } irq_set_default_host(domain); /* Initially mask all interrupts */ for (i = 0; i < IMR_NUM; i++) { writel_relaxed(~0, cd->gpc_base + GPC_IMR1_CORE0 + i * 4); writel_relaxed(~0, cd->gpc_base + GPC_IMR1_CORE1 + i * 4); cd->wakeup_sources[i] = ~0; } /* Let CORE0 as the default CPU to wake up by GPC */ cd->cpu2wakeup = GPC_IMR1_CORE0; /* * Due to hardware design failure, need to make sure GPR * interrupt(#32) is unmasked during RUN mode to avoid entering * DSM by mistake. */ writel_relaxed(~0x1, cd->gpc_base + cd->cpu2wakeup); imx_gpcv2_instance = cd; register_syscore_ops(&imx_gpcv2_syscore_ops); return 0; }