static int _od_suspend_noirq(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct omap_device *od = to_omap_device(pdev); int ret; /* Don't attempt late suspend on a driver that is not bound */ if (od->_driver_status != BUS_NOTIFY_BOUND_DRIVER) return 0; ret = pm_generic_suspend_noirq(dev); if (!ret && !pm_runtime_status_suspended(dev)) { if (pm_generic_runtime_suspend(dev) == 0) { pm_runtime_set_suspended(dev); omap_device_idle(pdev); od->flags |= OMAP_DEVICE_SUSPENDED; } } return ret; }
/** * omap_device_late_idle - idle devices without drivers * @dev: struct device * associated with omap_device * @data: unused * * Check the driver bound status of this device, and idle it * if there is no driver attached. */ static int __init omap_device_late_idle(struct device *dev, void *data) { struct platform_device *pdev = to_platform_device(dev); struct omap_device *od = to_omap_device(pdev); if (!od) return 0; /* * If omap_device state is enabled, but has no driver bound, * idle it. */ if (od->_driver_status != BUS_NOTIFY_BOUND_DRIVER) { if (od->_state == OMAP_DEVICE_STATE_ENABLED) { dev_warn(dev, "%s: enabled but no driver. Idling\n", __func__); omap_device_idle(pdev); } } return 0; }
static int _od_resume_noirq(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct omap_device *od = to_omap_device(pdev); if (od->flags & OMAP_DEVICE_SUSPENDED) { od->flags &= ~OMAP_DEVICE_SUSPENDED; omap_device_enable(pdev); /* * XXX: we run before core runtime pm has resumed itself. At * this point in time, we just restore the runtime pm state and * considering symmetric operations in resume, we donot expect * to fail. If we failed, something changed in core runtime_pm * framework OR some device driver messed things up, hence, WARN */ WARN(pm_runtime_set_active(dev), "Could not set %s runtime state active\n", dev_name(dev)); pm_generic_runtime_resume(dev); } return pm_generic_resume_noirq(dev); }
/** * omap_device_align_pm_lat - activate/deactivate device to match wakeup lat lim * @od: struct omap_device * * * When a device's maximum wakeup latency limit changes, call some of * the .activate_func or .deactivate_func function pointers in the * omap_device's pm_lats array to ensure that the device's maximum * wakeup latency is less than or equal to the new latency limit. * Intended to be called by OMAP PM code whenever a device's maximum * wakeup latency limit changes (e.g., via * omap_pm_set_dev_wakeup_lat()). Returns 0 if nothing needs to be * done (e.g., if the omap_device is not currently idle, or if the * wakeup latency is already current with the new limit) or passes * along the return value of _omap_device_deactivate() or * _omap_device_activate(). */ int omap_device_align_pm_lat(struct platform_device *pdev, u32 new_wakeup_lat_limit) { int ret = -EINVAL; struct omap_device *od; od = to_omap_device(pdev); if (new_wakeup_lat_limit == od->dev_wakeup_lat) return 0; od->_dev_wakeup_lat_limit = new_wakeup_lat_limit; if (od->_state != OMAP_DEVICE_STATE_IDLE) return 0; else if (new_wakeup_lat_limit > od->dev_wakeup_lat) ret = _omap_device_deactivate(od, USE_WAKEUP_LAT); else if (new_wakeup_lat_limit < od->dev_wakeup_lat) ret = _omap_device_activate(od, USE_WAKEUP_LAT); return ret; }
/* TBD: Will be removed once we have irq-chaing mechanism */ static bool omap_uart_chk_wakeup(struct platform_device *pdev) { struct omap_uart_port_info *up = pdev->dev.platform_data; struct omap_device *od; u32 wkst = 0; bool ret = false; od = to_omap_device(pdev); if (omap_hwmod_pad_get_wakeup_status(od->hwmods[0]) == true) ret = true; if (up->wk_st && up->wk_en && up->wk_mask) { /* Check for normal UART wakeup (and clear it) */ wkst = __raw_readl(up->wk_st) & up->wk_mask; if (wkst) { __raw_writel(wkst, up->wk_st); ret = true; } } return ret; }
static int _omap_mstandby_pm_notifier(struct notifier_block *self, unsigned long action, void *dev) { struct omap_hwmod_list *oh_list_item = NULL; struct platform_device *pdev; struct omap_device *od; switch (action) { case PM_POST_SUSPEND: list_for_each_entry(oh_list_item, omap_hwmod_force_mstandby_list_get(), oh_list) { pdev = to_platform_device( omap_device_get_by_hwmod_name( oh_list_item->oh->name)); od = to_omap_device(pdev); if (od && od->_driver_status != BUS_NOTIFY_BOUND_DRIVER) { omap_hwmod_enable(oh_list_item->oh); omap_hwmod_idle(oh_list_item->oh); } } }
static int _omap_device_notifier_call(struct notifier_block *nb, unsigned long event, void *dev) { struct platform_device *pdev = to_platform_device(dev); struct omap_device *od; switch (event) { case BUS_NOTIFY_DEL_DEVICE: if (pdev->archdata.od) omap_device_delete(pdev->archdata.od); break; case BUS_NOTIFY_ADD_DEVICE: if (pdev->dev.of_node) omap_device_build_from_dt(pdev); /* fall through */ default: od = to_omap_device(pdev); if (od) od->_driver_status = event; } return NOTIFY_DONE; }
/** * omap_device_shutdown - shut down an omap_device * @od: struct omap_device * to shut down * * Shut down omap_device @od by calling all .deactivate_func() entries * in the omap_device's pm_lats table and then shutting down all of * the underlying omap_hwmods. Used when a device is being "removed" * or a device driver is being unloaded. Returns -EINVAL if the * omap_device is not currently enabled or idle, or passes along the * return value of _omap_device_deactivate(). */ int omap_device_shutdown(struct platform_device *pdev) { int ret, i; struct omap_device *od; od = to_omap_device(pdev); if (od->_state != OMAP_DEVICE_STATE_ENABLED && od->_state != OMAP_DEVICE_STATE_IDLE) { WARN(1, "omap_device: %s.%d: %s() called from invalid state %d\n", od->pdev.name, od->pdev.id, __func__, od->_state); return -EINVAL; } ret = _omap_device_deactivate(od, IGNORE_WAKEUP_LAT); for (i = 0; i < od->hwmods_cnt; i++) omap_hwmod_shutdown(od->hwmods[i]); od->_state = OMAP_DEVICE_STATE_SHUTDOWN; return ret; }
static int serial_omap_probe(struct platform_device *pdev) { struct uart_omap_port *up = NULL; struct resource *mem, *irq, *dma_tx, *dma_rx; struct omap_uart_port_info *omap_up_info = pdev->dev.platform_data; struct omap_device *od; int ret = -ENOSPC; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(&pdev->dev, "no mem resource?\n"); return -ENODEV; } irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!irq) { dev_err(&pdev->dev, "no irq resource?\n"); return -ENODEV; } if (!request_mem_region(mem->start, (mem->end - mem->start) + 1, pdev->dev.driver->name)) { dev_err(&pdev->dev, "memory region already claimed\n"); return -EBUSY; } dma_rx = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx"); if (!dma_rx) { ret = -EINVAL; goto err; } dma_tx = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx"); if (!dma_tx) { ret = -EINVAL; goto err; } up = kzalloc(sizeof(*up), GFP_KERNEL); if (up == NULL) { ret = -ENOMEM; goto do_release_region; } sprintf(up->name, "OMAP UART%d", pdev->id); up->pdev = pdev; up->port.dev = &pdev->dev; up->port.type = PORT_OMAP; up->port.iotype = UPIO_MEM; up->port.irq = irq->start; up->port.regshift = 2; up->port.fifosize = 64; up->port.ops = &serial_omap_pops; up->port.line = pdev->id; up->port.mapbase = mem->start; up->port.membase = ioremap(mem->start, mem->end - mem->start); if (!up->port.membase) { dev_err(&pdev->dev, "can't ioremap UART\n"); ret = -ENOMEM; goto err1; } up->port.flags = omap_up_info->flags; up->port.uartclk = omap_up_info->uartclk; up->uart_dma.uart_base = mem->start; up->errata = omap_up_info->errata; up->enable_wakeup = omap_up_info->enable_wakeup; up->wer = omap_up_info->wer; up->chk_wakeup = omap_up_info->chk_wakeup; up->wake_peer = omap_up_info->wake_peer; up->rts_mux_driver_control = omap_up_info->rts_mux_driver_control; up->rts_pullup_in_suspend = 0; up->wer_restore = 0; if (omap_up_info->use_dma) { up->uart_dma.uart_dma_tx = dma_tx->start; up->uart_dma.uart_dma_rx = dma_rx->start; up->use_dma = 1; up->uart_dma.rx_buf_size = omap_up_info->dma_rx_buf_size; up->uart_dma.rx_timeout = omap_up_info->dma_rx_timeout; up->uart_dma.rx_poll_rate = omap_up_info->dma_rx_poll_rate; spin_lock_init(&(up->uart_dma.tx_lock)); spin_lock_init(&(up->uart_dma.rx_lock)); up->uart_dma.tx_dma_channel = OMAP_UART_DMA_CH_FREE; up->uart_dma.rx_dma_channel = OMAP_UART_DMA_CH_FREE; } pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_set_autosuspend_delay(&pdev->dev, omap_up_info->auto_sus_timeout); if (device_may_wakeup(&pdev->dev)) pm_runtime_enable(&pdev->dev); pm_runtime_irq_safe(&pdev->dev); if (omap_up_info->console_uart) { od = to_omap_device(up->pdev); omap_hwmod_idle(od->hwmods[0]); serial_omap_port_enable(up); serial_omap_port_disable(up); } ui[pdev->id] = up; serial_omap_add_console_port(up); ret = uart_add_one_port(&serial_omap_reg, &up->port); if (ret != 0) goto err1; dev_set_drvdata(&pdev->dev, up); platform_set_drvdata(pdev, up); return 0; err: dev_err(&pdev->dev, "[UART%d]: failure [%s]: %d\n", pdev->id, __func__, ret); err1: kfree(up); do_release_region: release_mem_region(mem->start, (mem->end - mem->start) + 1); return ret; }
static struct omap_hwmod_mux_info * __init omap_hwmod_mux_array_init(struct platform_device *pdev, const struct omap_device_pad *bpads[], int *nr_pads, const enum usbhs_omap_port_mode *port_mode) { struct omap_device_pad *pads; struct omap_hwmod_mux_info *hmux; struct omap_device *od; struct omap_hwmod *uhh_hwm; size_t npads; u32 i, k, j; for (i = 0, npads = 0; i < OMAP3_HS_USB_PORTS; i++) npads += nr_pads[i]; pads = kmalloc(sizeof(struct omap_device_pad)*npads, GFP_KERNEL); if (!pads) { pr_err("%s: Could not allocate device mux entry\n", __func__); return NULL; } for (i = 0, k = 0; i < OMAP3_HS_USB_PORTS; i++) if (nr_pads[i]) { memcpy(pads + k, bpads[i], sizeof(struct omap_device_pad) * nr_pads[i]); k += nr_pads[i]; } hmux = omap_hwmod_mux_init(pads, npads); if (!pdev) goto end; od = to_omap_device(pdev); uhh_hwm = od->hwmods[0]; if (!uhh_hwm) goto end; uhh_hwm->mux = hmux; for (i = 0, k = 0; i < OMAP3_HS_USB_PORTS; i++) { switch (port_mode[i]) { case OMAP_EHCI_PORT_MODE_PHY: case OMAP_EHCI_PORT_MODE_TLL: case OMAP_EHCI_PORT_MODE_HSIC: for (j = 0; j < nr_pads[i]; j++) omap_hwmod_pad_wakeup_handler(uhh_hwm, k + j, usbhs_wakeup_handler); break; case OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0: case OMAP_OHCI_PORT_MODE_PHY_6PIN_DPDM: case OMAP_OHCI_PORT_MODE_TLL_6PIN_DATSE0: case OMAP_OHCI_PORT_MODE_TLL_6PIN_DPDM: case OMAP_OHCI_PORT_MODE_PHY_4PIN_DPDM: case OMAP_OHCI_PORT_MODE_TLL_4PIN_DPDM: case OMAP_OHCI_PORT_MODE_PHY_3PIN_DATSE0: case OMAP_OHCI_PORT_MODE_TLL_3PIN_DATSE0: case OMAP_OHCI_PORT_MODE_TLL_2PIN_DATSE0: case OMAP_OHCI_PORT_MODE_TLL_2PIN_DPDM: for (j = 0; j < nr_pads[i]; j++) omap_hwmod_pad_wakeup_handler(uhh_hwm, k + j, usbhs_wakeup_handler); break; case OMAP_USBHS_PORT_MODE_UNUSED: default: break; } k += nr_pads[i]; } end: kfree(pads); return hmux; }