static int am33xx_pm_suspend(void) { int state, ret = 0; struct omap_hwmod *cpgmac_oh, *gpmc_oh, *usb_oh; cpgmac_oh = omap_hwmod_lookup("cpgmac0"); usb_oh = omap_hwmod_lookup("usb_otg_hs"); gpmc_oh = omap_hwmod_lookup("gpmc"); omap_hwmod_enable(cpgmac_oh); omap_hwmod_enable(usb_oh); omap_hwmod_enable(gpmc_oh); omap_hwmod_idle(cpgmac_oh); omap_hwmod_idle(usb_oh); omap_hwmod_idle(gpmc_oh); if (gfx_l3_clkdm && gfx_l4ls_clkdm) { clkdm_sleep(gfx_l3_clkdm); clkdm_sleep(gfx_l4ls_clkdm); } /* Try to put GFX to sleep */ if (gfx_pwrdm) pwrdm_set_next_pwrst(gfx_pwrdm, PWRDM_POWER_OFF); else pr_err("Could not program GFX to low power state\n"); writel(0x0, AM33XX_CM_MPU_MPU_CLKCTRL); ret = cpu_suspend(0, am33xx_do_sram_idle); writel(0x2, AM33XX_CM_MPU_MPU_CLKCTRL); if (gfx_pwrdm) { state = pwrdm_read_pwrst(gfx_pwrdm); if (state != PWRDM_POWER_OFF) pr_err("GFX domain did not transition to low power state\n"); else pr_info("GFX domain entered low power state\n"); } /* XXX: Why do we need to wakeup the clockdomains? */ if(gfx_l3_clkdm && gfx_l4ls_clkdm) { clkdm_wakeup(gfx_l3_clkdm); clkdm_wakeup(gfx_l4ls_clkdm); } core_suspend_stat = ret; return ret; }
static void omap_clkevt_suspend(struct clock_event_device *unused) { if (!clockevent_gpt_hwmod) return; omap_hwmod_idle(clockevent_gpt_hwmod); }
/* Setup free-running counter for clocksource */ static int __init __maybe_unused omap2_sync32k_clocksource_init(void) { int ret; struct device_node *np = NULL; struct omap_hwmod *oh; void __iomem *vbase; const char *oh_name = "counter_32k"; /* * If device-tree is present, then search the DT blob * to see if the 32kHz counter is supported. */ if (of_have_populated_dt()) { np = omap_get_timer_dt(omap_counter_match, NULL); if (!np) return -ENODEV; of_property_read_string_index(np, "ti,hwmods", 0, &oh_name); if (!oh_name) return -ENODEV; } /* * First check hwmod data is available for sync32k counter */ oh = omap_hwmod_lookup(oh_name); if (!oh || oh->slaves_cnt == 0) return -ENODEV; omap_hwmod_setup_one(oh_name); if (np) { vbase = of_iomap(np, 0); of_node_put(np); } else { vbase = omap_hwmod_get_mpu_rt_va(oh); } if (!vbase) { pr_warn("%s: failed to get counter_32k resource\n", __func__); return -ENXIO; } ret = omap_hwmod_enable(oh); if (ret) { pr_warn("%s: failed to enable counter_32k module (%d)\n", __func__, ret); return ret; } ret = omap_init_clocksource_32k(vbase); if (ret) { pr_warn("%s: failed to initialize counter_32k as a clocksource (%d)\n", __func__, ret); omap_hwmod_idle(oh); } return ret; }
static int ti_sysc_idle_module(struct device *dev, const struct ti_sysc_cookie *cookie) { if (!cookie->data) return -EINVAL; return omap_hwmod_idle(cookie->data); }
static void espresso10_lcd_set_gptimer_idle(void) { struct omap_hwmod *timer10_hwmod; pr_debug("espresso10_lcd_set_gptimer_idle\n"); timer10_hwmod = omap_hwmod_lookup("timer10"); if (likely(timer10_hwmod)) omap_hwmod_idle(timer10_hwmod); }
/** * _omap_device_idle_hwmods - call omap_hwmod_idle() on all hwmods * @od: struct omap_device *od * * Idle all underlying hwmods. Returns 0. */ static int _omap_device_idle_hwmods(struct omap_device *od) { int ret = 0; int i; for (i = 0; i < od->hwmods_cnt; i++) ret |= omap_hwmod_idle(od->hwmods[i]); return ret; }
static void omap_clkevt_suspend(struct clock_event_device *unused) { struct omap_hwmod *oh; oh = omap_hwmod_lookup(clockevent_gpt.name); if (!oh) return; omap_hwmod_idle(oh); }
/** * omap_device_idle_hwmods - call omap_hwmod_idle() on all hwmods * @od: struct omap_device *od * * Idle all underlying hwmods. Returns 0. */ int omap_device_idle_hwmods(struct omap_device *od) { int i; for (i = 0; i < od->hwmods_cnt; i++) omap_hwmod_idle(od->hwmods[i]); /* XXX pass along return value here? */ return 0; }
/** * omap_device_idle_hwmods - call omap_hwmod_idle() on all hwmods * @od: struct omap_device *od * * Idle all underlying hwmods. Returns 0. */ int omap_device_idle_hwmods(struct omap_device *od) { struct omap_hwmod *oh; int i; for (i = 0, oh = *od->hwmods; i < od->hwmods_cnt; i++, oh++) omap_hwmod_idle(oh); /* XXX pass along return value here? */ return 0; }
static void omap2_gptimer_clksrc_suspend(struct clocksource *unused) { struct omap_hwmod *oh; omap2_gptimer_clksrc_load = __omap_dm_timer_read_counter(&clksrc, OMAP_TIMER_NONPOSTED); oh = omap_hwmod_lookup(clocksource_gpt.name); if (!oh) return; omap_hwmod_idle(oh); }
static void save_secure_all(void) { u32 ret; omap_hwmod_enable(l3_main_3_oh); ret = omap_secure_dispatcher(secure_hal_save_all_api_index, FLAG_START_CRITICAL, 1, omap_secure_ram_mempool_base(), 0, 0, 0); omap_hwmod_idle(l3_main_3_oh); if (ret != API_HAL_RET_VALUE_OK) pr_err("Secure all context save failed\n"); }
static int omap2_disable_wdt(struct omap_hwmod *oh, void *unused) { void __iomem *base; int ret; if (!oh) { pr_err("%s: Could not look up wdtimer_hwmod\n", __func__); return -EINVAL; } base = omap_hwmod_get_mpu_rt_va(oh); if (!base) { pr_err("%s: Could not get the base address for %s\n", oh->name, __func__); return -EINVAL; } /* Enable the clocks before accessing the WDT registers */ ret = omap_hwmod_enable(oh); if (ret) { pr_err("%s: Could not enable clocks for %s\n", oh->name, __func__); return ret; } /* sequence required to disable watchdog */ __raw_writel(0xAAAA, base + OMAP_WDT_SPR); while (__raw_readl(base + OMAP_WDT_WPS) & 0x10) cpu_relax(); __raw_writel(0x5555, base + OMAP_WDT_SPR); while (__raw_readl(base + OMAP_WDT_WPS) & 0x10) cpu_relax(); ret = omap_hwmod_idle(oh); if (ret) pr_err("%s: Could not disable clocks for %s\n", oh->name, __func__); return ret; }
/* Setup free-running counter for clocksource */ static int __init omap2_sync32k_clocksource_init(void) { int ret; struct omap_hwmod *oh; void __iomem *vbase; const char *oh_name = "counter_32k"; /* * First check hwmod data is available for sync32k counter */ oh = omap_hwmod_lookup(oh_name); if (!oh || oh->slaves_cnt == 0) return -ENODEV; omap_hwmod_setup_one(oh_name); vbase = omap_hwmod_get_mpu_rt_va(oh); if (!vbase) { pr_warn("%s: failed to get counter_32k resource\n", __func__); return -ENXIO; } ret = omap_hwmod_enable(oh); if (ret) { pr_warn("%s: failed to enable counter_32k module (%d)\n", __func__, ret); return ret; } ret = omap_init_clocksource_32k(vbase); if (ret) { pr_warn("%s: failed to initialize counter_32k as a clocksource (%d)\n", __func__, ret); omap_hwmod_idle(oh); } return ret; }
static int _omap_mstandby_pm_notifier(struct notifier_block *self, unsigned long action, void *dev) { struct omap_hwmod_list *oh_list_item = NULL; struct platform_device *pdev; struct omap_device *od; switch (action) { case PM_POST_SUSPEND: list_for_each_entry(oh_list_item, omap_hwmod_force_mstandby_list_get(), oh_list) { pdev = to_platform_device( omap_device_get_by_hwmod_name( oh_list_item->oh->name)); od = to_omap_device(pdev); if (od && od->_driver_status != BUS_NOTIFY_BOUND_DRIVER) { omap_hwmod_enable(oh_list_item->oh); omap_hwmod_idle(oh_list_item->oh); } } }
static int am33xx_pm_suspend(void) { int state, ret = 0; struct omap_hwmod *gpmc_oh, *usb_oh, *gpio1_oh; usb_oh = omap_hwmod_lookup("usb_otg_hs"); gpmc_oh = omap_hwmod_lookup("gpmc"); gpio1_oh = omap_hwmod_lookup("gpio1"); /* WKUP domain GPIO */ omap_hwmod_enable(usb_oh); omap_hwmod_enable(gpmc_oh); /* * Keep USB module enabled during standby * to enable USB remote wakeup * Note: This will result in hard-coding USB state * during standby */ if (suspend_state != PM_SUSPEND_STANDBY) omap_hwmod_idle(usb_oh); omap_hwmod_idle(gpmc_oh); /* * Disable the GPIO module. This ensure that * only sWAKEUP interrupts to Cortex-M3 get generated * * XXX: EVM_SK uses a GPIO0 pin for VTP control * in suspend and hence we can't do this for EVM_SK * alone. The side-effect of this is that GPIO wakeup * might have issues. Refer to commit 672639b for the * details */ /* * Keep GPIO0 module enabled during standby to * support wakeup via GPIO0 keys. */ if ((suspend_cfg_param_list[EVM_ID] != EVM_SK) && (suspend_state != PM_SUSPEND_STANDBY)) omap_hwmod_idle(gpio1_oh); /* * Update Suspend_State value to be used in sleep33xx.S to keep * GPIO0 module enabled during standby for EVM-SK */ if (suspend_state == PM_SUSPEND_STANDBY) suspend_cfg_param_list[SUSPEND_STATE] = PM_STANDBY; else suspend_cfg_param_list[SUSPEND_STATE] = PM_DS0; /* * Keep Touchscreen module enabled during standby * to enable wakeup from standby. */ if (suspend_state == PM_SUSPEND_STANDBY) writel(0x2, AM33XX_CM_WKUP_ADC_TSC_CLKCTRL); if (gfx_l3_clkdm && gfx_l4ls_clkdm) { clkdm_sleep(gfx_l3_clkdm); clkdm_sleep(gfx_l4ls_clkdm); } /* Try to put GFX to sleep */ if (gfx_pwrdm) pwrdm_set_next_pwrst(gfx_pwrdm, PWRDM_POWER_OFF); else pr_err("Could not program GFX to low power state\n"); omap3_intc_suspend(); writel(0x0, AM33XX_CM_MPU_MPU_CLKCTRL); ret = cpu_suspend(0, am33xx_do_sram_idle); writel(0x2, AM33XX_CM_MPU_MPU_CLKCTRL); if (gfx_pwrdm) { state = pwrdm_read_pwrst(gfx_pwrdm); if (state != PWRDM_POWER_OFF) pr_err("GFX domain did not transition to low power state\n"); else pr_info("GFX domain entered low power state\n"); } /* XXX: Why do we need to wakeup the clockdomains? */ if(gfx_l3_clkdm && gfx_l4ls_clkdm) { clkdm_wakeup(gfx_l3_clkdm); clkdm_wakeup(gfx_l4ls_clkdm); } /* * Touchscreen module was enabled during standby * Disable it here. */ if (suspend_state == PM_SUSPEND_STANDBY) writel(0x0, AM33XX_CM_WKUP_ADC_TSC_CLKCTRL); /* * Put USB module to idle on resume from standby */ if (suspend_state == PM_SUSPEND_STANDBY) omap_hwmod_idle(usb_oh); ret = am33xx_verify_lp_state(ret); /* * Enable the GPIO module. Once the driver is * fully adapted to runtime PM this will go away */ /* * During standby, GPIO was not disabled. Hence no * need to enable it here. */ if ((suspend_cfg_param_list[EVM_ID] != EVM_SK) && (suspend_state != PM_SUSPEND_STANDBY)) omap_hwmod_enable(gpio1_oh); return ret; }
static int serial_omap_probe(struct platform_device *pdev) { struct uart_omap_port *up = NULL; struct resource *mem, *irq, *dma_tx, *dma_rx; struct omap_uart_port_info *omap_up_info = pdev->dev.platform_data; struct omap_device *od; int ret = -ENOSPC; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(&pdev->dev, "no mem resource?\n"); return -ENODEV; } irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!irq) { dev_err(&pdev->dev, "no irq resource?\n"); return -ENODEV; } if (!request_mem_region(mem->start, (mem->end - mem->start) + 1, pdev->dev.driver->name)) { dev_err(&pdev->dev, "memory region already claimed\n"); return -EBUSY; } dma_rx = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx"); if (!dma_rx) { ret = -EINVAL; goto err; } dma_tx = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx"); if (!dma_tx) { ret = -EINVAL; goto err; } up = kzalloc(sizeof(*up), GFP_KERNEL); if (up == NULL) { ret = -ENOMEM; goto do_release_region; } sprintf(up->name, "OMAP UART%d", pdev->id); up->pdev = pdev; up->port.dev = &pdev->dev; up->port.type = PORT_OMAP; up->port.iotype = UPIO_MEM; up->port.irq = irq->start; up->port.regshift = 2; up->port.fifosize = 64; up->port.ops = &serial_omap_pops; up->port.line = pdev->id; up->port.mapbase = mem->start; up->port.membase = ioremap(mem->start, mem->end - mem->start); if (!up->port.membase) { dev_err(&pdev->dev, "can't ioremap UART\n"); ret = -ENOMEM; goto err1; } up->port.flags = omap_up_info->flags; up->port.uartclk = omap_up_info->uartclk; up->uart_dma.uart_base = mem->start; up->errata = omap_up_info->errata; up->enable_wakeup = omap_up_info->enable_wakeup; up->wer = omap_up_info->wer; up->chk_wakeup = omap_up_info->chk_wakeup; up->wake_peer = omap_up_info->wake_peer; up->rts_mux_driver_control = omap_up_info->rts_mux_driver_control; up->rts_pullup_in_suspend = 0; up->wer_restore = 0; if (omap_up_info->use_dma) { up->uart_dma.uart_dma_tx = dma_tx->start; up->uart_dma.uart_dma_rx = dma_rx->start; up->use_dma = 1; up->uart_dma.rx_buf_size = omap_up_info->dma_rx_buf_size; up->uart_dma.rx_timeout = omap_up_info->dma_rx_timeout; up->uart_dma.rx_poll_rate = omap_up_info->dma_rx_poll_rate; spin_lock_init(&(up->uart_dma.tx_lock)); spin_lock_init(&(up->uart_dma.rx_lock)); up->uart_dma.tx_dma_channel = OMAP_UART_DMA_CH_FREE; up->uart_dma.rx_dma_channel = OMAP_UART_DMA_CH_FREE; } pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_set_autosuspend_delay(&pdev->dev, omap_up_info->auto_sus_timeout); if (device_may_wakeup(&pdev->dev)) pm_runtime_enable(&pdev->dev); pm_runtime_irq_safe(&pdev->dev); if (omap_up_info->console_uart) { od = to_omap_device(up->pdev); omap_hwmod_idle(od->hwmods[0]); serial_omap_port_enable(up); serial_omap_port_disable(up); } ui[pdev->id] = up; serial_omap_add_console_port(up); ret = uart_add_one_port(&serial_omap_reg, &up->port); if (ret != 0) goto err1; dev_set_drvdata(&pdev->dev, up); platform_set_drvdata(pdev, up); return 0; err: dev_err(&pdev->dev, "[UART%d]: failure [%s]: %d\n", pdev->id, __func__, ret); err1: kfree(up); do_release_region: release_mem_region(mem->start, (mem->end - mem->start) + 1); return ret; }
static int uart_idle_hwmod(struct omap_device *od) { omap_hwmod_idle(od->hwmods[0]); return 0; }