/** * dwc3_otg_sm_work - workqueue function. * * @w: Pointer to the dwc3 otg workqueue * * NOTE: After any change in phy->state, * we must reschdule the state machine. */ static void dwc3_otg_sm_work(struct work_struct *w) { struct dwc3_otg *dotg = container_of(w, struct dwc3_otg, sm_work); struct usb_phy *phy = dotg->otg.phy; struct dwc3_charger *charger = dotg->charger; bool work = 0; pm_runtime_resume(phy->dev); dev_dbg(phy->dev, "%s state\n", otg_state_string(phy->state)); /* Check OTG state */ switch (phy->state) { case OTG_STATE_UNDEFINED: dwc3_otg_init_sm(dotg); if (!dotg->psy) { dotg->psy = power_supply_get_by_name("usb"); if (!dotg->psy) dev_err(phy->dev, "couldn't get usb power supply\n"); } /* Switch to A or B-Device according to ID / BSV */ if (!test_bit(ID, &dotg->inputs)) { dev_dbg(phy->dev, "!id\n"); phy->state = OTG_STATE_A_IDLE; work = 1; } else if (test_bit(B_SESS_VLD, &dotg->inputs)) { dev_dbg(phy->dev, "b_sess_vld\n"); phy->state = OTG_STATE_B_IDLE; work = 1; } else { phy->state = OTG_STATE_B_IDLE; dev_dbg(phy->dev, "No device, trying to suspend\n"); pm_runtime_put_sync(phy->dev); } break; case OTG_STATE_B_IDLE: if (!test_bit(ID, &dotg->inputs)) { dev_dbg(phy->dev, "!id\n"); phy->state = OTG_STATE_A_IDLE; work = 1; if (charger) { if (charger->chg_type == DWC3_INVALID_CHARGER) charger->start_detection(dotg->charger, false); else charger->chg_type = DWC3_INVALID_CHARGER; } } else if (test_bit(B_SESS_VLD, &dotg->inputs)) { dev_dbg(phy->dev, "b_sess_vld\n"); if (charger) { /* Has charger been detected? If no detect it */ switch (charger->chg_type) { case DWC3_DCP_CHARGER: dev_dbg(phy->dev, "lpm, DCP charger\n"); dwc3_otg_set_power(phy, DWC3_IDEV_CHG_MAX); pm_runtime_put_sync(phy->dev); break; case DWC3_CDP_CHARGER: dwc3_otg_set_power(phy, DWC3_IDEV_CHG_MAX); dwc3_otg_start_peripheral(&dotg->otg, 1); phy->state = OTG_STATE_B_PERIPHERAL; work = 1; break; case DWC3_SDP_CHARGER: dwc3_otg_start_peripheral(&dotg->otg, 1); phy->state = OTG_STATE_B_PERIPHERAL; work = 1; break; default: dev_dbg(phy->dev, "chg_det started\n"); charger->start_detection(charger, true); break; } } else { /* no charger registered, start peripheral */ if (dwc3_otg_start_peripheral(&dotg->otg, 1)) { /* * Probably set_peripheral not called * yet. We will re-try as soon as it * will be called */ dev_err(phy->dev, "enter lpm as\n" "unable to start B-device\n"); phy->state = OTG_STATE_UNDEFINED; pm_runtime_put_sync(phy->dev); return; } } } else { if (charger) charger->start_detection(dotg->charger, false); dwc3_otg_set_power(phy, 0); dev_dbg(phy->dev, "No device, trying to suspend\n"); pm_runtime_put_sync(phy->dev); } break; case OTG_STATE_B_PERIPHERAL: if (!test_bit(B_SESS_VLD, &dotg->inputs) || !test_bit(ID, &dotg->inputs)) { dev_dbg(phy->dev, "!id || !bsv\n"); dwc3_otg_start_peripheral(&dotg->otg, 0); phy->state = OTG_STATE_B_IDLE; if (charger) charger->chg_type = DWC3_INVALID_CHARGER; work = 1; } break; case OTG_STATE_A_IDLE: /* Switch to A-Device*/ if (test_bit(ID, &dotg->inputs)) { dev_dbg(phy->dev, "id\n"); phy->state = OTG_STATE_B_IDLE; work = 1; } else { phy->state = OTG_STATE_A_HOST; if (dwc3_otg_start_host(&dotg->otg, 1)) { /* * Probably set_host was not called yet. * We will re-try as soon as it will be called */ dev_dbg(phy->dev, "enter lpm as\n" "unable to start A-device\n"); phy->state = OTG_STATE_UNDEFINED; pm_runtime_put_sync(phy->dev); return; } } break; case OTG_STATE_A_HOST: if (test_bit(ID, &dotg->inputs)) { dev_dbg(phy->dev, "id\n"); dwc3_otg_start_host(&dotg->otg, 0); phy->state = OTG_STATE_B_IDLE; work = 1; } break; default: dev_err(phy->dev, "%s: invalid otg-state\n", __func__); } if (work) schedule_work(&dotg->sm_work); }
/* * Instantiate the generic non-control parts of the device. */ static int wm8994_device_init(struct wm8994 *wm8994, int irq) { struct wm8994_pdata *pdata = wm8994->dev->platform_data; const char *devname; int ret, i; mutex_init(&wm8994->io_lock); dev_set_drvdata(wm8994->dev, wm8994); /* Add the on-chip regulators first for bootstrapping */ ret = mfd_add_devices(wm8994->dev, -1, wm8994_regulator_devs, ARRAY_SIZE(wm8994_regulator_devs), NULL, 0); if (ret != 0) { dev_err(wm8994->dev, "Failed to add children: %d\n", ret); goto err; } switch (wm8994->type) { case WM8994: wm8994->num_supplies = ARRAY_SIZE(wm8994_main_supplies); break; case WM8958: wm8994->num_supplies = ARRAY_SIZE(wm8958_main_supplies); break; default: BUG(); return -EINVAL; } wm8994->supplies = kzalloc(sizeof(struct regulator_bulk_data) * wm8994->num_supplies, GFP_KERNEL); if (!wm8994->supplies) { ret = -ENOMEM; goto err; } switch (wm8994->type) { case WM8994: for (i = 0; i < ARRAY_SIZE(wm8994_main_supplies); i++) wm8994->supplies[i].supply = wm8994_main_supplies[i]; break; case WM8958: for (i = 0; i < ARRAY_SIZE(wm8958_main_supplies); i++) wm8994->supplies[i].supply = wm8958_main_supplies[i]; break; default: BUG(); return -EINVAL; } ret = regulator_bulk_get(wm8994->dev, wm8994->num_supplies, wm8994->supplies); if (ret != 0) { dev_err(wm8994->dev, "Failed to get supplies: %d\n", ret); goto err_supplies; } ret = regulator_bulk_enable(wm8994->num_supplies, wm8994->supplies); if (ret != 0) { dev_err(wm8994->dev, "Failed to enable supplies: %d\n", ret); goto err_get; } ret = wm8994_reg_read(wm8994, WM8994_SOFTWARE_RESET); if (ret < 0) { dev_err(wm8994->dev, "Failed to read ID register\n"); goto err_enable; } switch (ret) { case 0x8994: devname = "WM8994"; if (wm8994->type != WM8994) dev_warn(wm8994->dev, "Device registered as type %d\n", wm8994->type); wm8994->type = WM8994; break; case 0x8958: devname = "WM8958"; if (wm8994->type != WM8958) dev_warn(wm8994->dev, "Device registered as type %d\n", wm8994->type); wm8994->type = WM8958; break; default: dev_err(wm8994->dev, "Device is not a WM8994, ID is %x\n", ret); ret = -EINVAL; goto err_enable; } ret = wm8994_reg_read(wm8994, WM8994_CHIP_REVISION); if (ret < 0) { dev_err(wm8994->dev, "Failed to read revision register: %d\n", ret); goto err_enable; } switch (ret) { case 0: case 1: if (wm8994->type == WM8994) dev_warn(wm8994->dev, "revision %c not fully supported\n", 'A' + ret); break; default: break; } dev_info(wm8994->dev, "%s revision %c\n", devname, 'A' + ret); if (pdata) { wm8994->irq_base = pdata->irq_base; wm8994->gpio_base = pdata->gpio_base; /* GPIO configuration is only applied if it's non-zero */ for (i = 0; i < ARRAY_SIZE(pdata->gpio_defaults); i++) { if (pdata->gpio_defaults[i]) { wm8994_set_bits(wm8994, WM8994_GPIO_1 + i, 0xffff, pdata->gpio_defaults[i]); } } } /* In some system designs where the regulators are not in use, * we can achieve a small reduction in leakage currents by * floating LDO outputs. This bit makes no difference if the * LDOs are enabled, it only affects cases where the LDOs were * in operation and are then disabled. */ for (i = 0; i < WM8994_NUM_LDO_REGS; i++) { if (wm8994_ldo_in_use(pdata, i)) wm8994_set_bits(wm8994, WM8994_LDO_1 + i, WM8994_LDO1_DISCH, WM8994_LDO1_DISCH); else wm8994_set_bits(wm8994, WM8994_LDO_1 + i, WM8994_LDO1_DISCH, 0); } wm8994_irq_init(wm8994); ret = mfd_add_devices(wm8994->dev, -1, wm8994_devs, ARRAY_SIZE(wm8994_devs), NULL, 0); if (ret != 0) { dev_err(wm8994->dev, "Failed to add children: %d\n", ret); goto err_irq; } pm_runtime_enable(wm8994->dev); pm_runtime_resume(wm8994->dev); return 0; err_irq: wm8994_irq_exit(wm8994); err_enable: regulator_bulk_disable(wm8994->num_supplies, wm8994->supplies); err_get: regulator_bulk_free(wm8994->num_supplies, wm8994->supplies); err_supplies: kfree(wm8994->supplies); err: mfd_remove_devices(wm8994->dev); kfree(wm8994); return ret; }
static long link_pm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int value, err = 0; struct link_pm_data *pm_data = file->private_data; mif_info("cmd: 0x%08x\n", cmd); switch (cmd) { case IOCTL_LINK_CONTROL_ACTIVE: if (copy_from_user(&value, (const void __user *)arg, sizeof(int))) return -EFAULT; gpio_set_value(pm_data->gpio_link_active, value); break; case IOCTL_LINK_GET_HOSTWAKE: return !gpio_get_value(pm_data->gpio_link_hostwake); case IOCTL_LINK_CONNECTED: return pm_data->usb_ld->if_usb_connected; case IOCTL_LINK_PORT_ON: /* hub only */ /* ignore cp host wakeup irq, set the hub_init_lock when AP try CP off and release hub_init_lock when CP boot done */ pm_data->hub_init_lock = 0; if (pm_data->root_hub) { pm_runtime_resume(pm_data->root_hub); pm_runtime_forbid(pm_data->root_hub->parent); } if (pm_data->port_enable) { err = pm_data->port_enable(2, 1); if (err < 0) { mif_err("hub on fail err=%d\n", err); goto exit; } pm_data->hub_status = HUB_STATE_RESUMMING; } break; case IOCTL_LINK_PORT_OFF: /* hub only */ if (pm_data->usb_ld->if_usb_connected) { struct usb_device *udev = pm_data->usb_ld->usbdev->parent; pm_runtime_get_sync(&udev->dev); if (udev->state != USB_STATE_NOTATTACHED) { usb_force_disconnect(udev); pr_info("force disconnect maybe cp-reset!!\n"); } pm_runtime_put_autosuspend(&udev->dev); } err = link_pm_hub_standby(pm_data); if (err < 0) { mif_err("usb3503 active fail\n"); goto exit; } pm_data->hub_init_lock = 1; pm_data->hub_handshake_done = 0; break; default: break; } exit: return err; }
static int s5p_ehci_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct s5p_ehci_platdata *pdata = pdev->dev.platform_data; struct s5p_ehci_hcd *s5p_ehci = platform_get_drvdata(pdev); struct usb_hcd *hcd = s5p_ehci->hcd; struct ehci_hcd *ehci = hcd_to_ehci(hcd); unsigned long flags; int rc = 0; #ifdef CONFIG_MDM_HSIC_PM /* * check suspend returns 1 if it is possible to suspend * otherwise, it returns 0 impossible or returns some error */ rc = check_udev_suspend_allowed(hsic_pm_dev); if (rc > 0) { set_host_stat(hsic_pm_dev, POWER_OFF); if (wait_dev_pwr_stat(hsic_pm_dev, POWER_OFF) < 0) { set_host_stat(hsic_pm_dev, POWER_ON); pm_runtime_resume(&pdev->dev); return -EBUSY; } } else if (rc == -ENODEV) { /* no hsic pm driver loaded, proceed suspend */ pr_debug("%s: suspend without hsic pm\n", __func__); } else { pm_runtime_resume(&pdev->dev); return -EBUSY; } rc = 0; #endif #if defined(CONFIG_LINK_DEVICE_HSIC) || defined(CONFIG_LINK_DEVICE_USB) rc = get_hostwake_state(); if (rc) { pr_info("%s: suspend fail by host wakeup irq\n", __func__); pm_runtime_resume(&pdev->dev); return -EBUSY; } #endif if (time_before(jiffies, ehci->next_statechange)) usleep_range(10000, 11000); /* Root hub was already suspended. Disable irq emission and * mark HW unaccessible, bail out if RH has been resumed. Use * the spinlock to properly synchronize with possible pending * RH suspend or resume activity. * * This is still racy as hcd->state is manipulated outside of * any locks =P But that will be a different fix. */ spin_lock_irqsave(&ehci->lock, flags); if (hcd->state != HC_STATE_SUSPENDED && hcd->state != HC_STATE_HALT) { spin_unlock_irqrestore(&ehci->lock, flags); return -EINVAL; } ehci_writel(ehci, 0, &ehci->regs->intr_enable); (void)ehci_readl(ehci, &ehci->regs->intr_enable); clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); spin_unlock_irqrestore(&ehci->lock, flags); if (pdata && pdata->phy_exit) pdata->phy_exit(pdev, S5P_USB_PHY_HOST); #if defined(CONFIG_LINK_DEVICE_HSIC) || defined(CONFIG_LINK_DEVICE_USB) set_host_states(0); #endif clk_disable(s5p_ehci->clk); return rc; }
static int s5p_ehci_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct s5p_ehci_hcd *s5p_ehci = platform_get_drvdata(pdev); struct usb_hcd *hcd = s5p_ehci->hcd; struct ehci_hcd *ehci = hcd_to_ehci(hcd); clk_enable(s5p_ehci->clk); pm_runtime_resume(&pdev->dev); s5p_ehci_phy_init(pdev); /* if EHCI was off, hcd was removed */ if (!s5p_ehci->power_on) { dev_info(dev, "Nothing to do for the device (power off)\n"); return 0; } if (time_before(jiffies, ehci->next_statechange)) msleep(10); /* Mark hardware accessible again as we are out of D3 state by now */ set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF) { int mask = INTR_MASK; if (!hcd->self.root_hub->do_remote_wakeup) mask &= ~STS_PCD; ehci_writel(ehci, mask, &ehci->regs->intr_enable); ehci_readl(ehci, &ehci->regs->intr_enable); return 0; } ehci_dbg(ehci, "lost power, restarting\n"); usb_root_hub_lost_power(hcd->self.root_hub); (void) ehci_halt(ehci); (void) ehci_reset(ehci); /* emptying the schedule aborts any urbs */ spin_lock_irq(&ehci->lock); if (ehci->reclaim) end_unlink_async(ehci); ehci_work(ehci); spin_unlock_irq(&ehci->lock); ehci_writel(ehci, ehci->command, &ehci->regs->command); ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag); ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */ /* here we "know" root ports should always stay powered */ ehci_port_power(ehci, 1); hcd->state = HC_STATE_SUSPENDED; #if defined(CONFIG_LINK_DEVICE_HSIC) || \ (defined(CONFIG_LINK_DEVICE_USB) && \ !defined(CONFIG_USBHUB_USB3503)) s5p_wait_for_cp_resume(pdev, hcd); #endif return 0; }
/** * acpi_subsys_suspend - Run the device driver's suspend callback. * @dev: Device to handle. * * Follow PCI and resume devices suspended at run time before running their * system suspend callbacks. */ int acpi_subsys_suspend(struct device *dev) { pm_runtime_resume(dev); return pm_generic_suspend(dev); }
static void msm_hsusb_request_host(void *handle, int request) { struct msmusb_hcd *mhcd = handle; struct usb_hcd *hcd = mhcd_to_hcd(mhcd); struct msm_usb_host_platform_data *pdata = mhcd->pdata; struct msm_otg *otg = container_of(mhcd->xceiv, struct msm_otg, otg); #ifdef CONFIG_USB_OTG struct usb_device *udev = hcd->self.root_hub; #endif struct device *dev = hcd->self.controller; switch (request) { #ifdef CONFIG_USB_OTG case REQUEST_HNP_SUSPEND: /* disable Root hub auto suspend. As hardware is configured * for peripheral mode, mark hardware is not available. */ if (PHY_TYPE(pdata->phy_info) == USB_PHY_INTEGRATED) { pm_runtime_disable(&udev->dev); /* Mark root hub as disconnected. This would * protect suspend/resume via sysfs. */ udev->state = USB_STATE_NOTATTACHED; clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); hcd->state = HC_STATE_HALT; pm_runtime_put_noidle(dev); pm_runtime_suspend(dev); } break; case REQUEST_HNP_RESUME: if (PHY_TYPE(pdata->phy_info) == USB_PHY_INTEGRATED) { pm_runtime_get_noresume(dev); pm_runtime_resume(dev); disable_irq(hcd->irq); ehci_msm_reset(hcd); ehci_msm_run(hcd); set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); pm_runtime_enable(&udev->dev); udev->state = USB_STATE_CONFIGURED; enable_irq(hcd->irq); } break; #endif case REQUEST_RESUME: usb_hcd_resume_root_hub(hcd); break; case REQUEST_START: if (mhcd->running) break; pm_runtime_get_noresume(dev); pm_runtime_resume(dev); wake_lock(&mhcd->wlock); msm_xusb_pm_qos_update(mhcd, 1); msm_xusb_enable_clks(mhcd); if (PHY_TYPE(pdata->phy_info) == USB_PHY_INTEGRATED) if (otg->set_clk) otg->set_clk(mhcd->xceiv, 1); if (pdata->vbus_power) pdata->vbus_power(pdata->phy_info, 1); if (pdata->config_gpio) pdata->config_gpio(1); usb_add_hcd(hcd, hcd->irq, IRQF_SHARED); mhcd->running = 1; if (PHY_TYPE(pdata->phy_info) == USB_PHY_INTEGRATED) if (otg->set_clk) otg->set_clk(mhcd->xceiv, 0); break; case REQUEST_STOP: if (!mhcd->running) break; mhcd->running = 0; /* come out of lpm before deregistration */ if (PHY_TYPE(pdata->phy_info) == USB_PHY_SERIAL_PMIC) { usb_lpm_exit(hcd); if (cancel_work_sync(&(mhcd->lpm_exit_work))) usb_lpm_exit_w(&mhcd->lpm_exit_work); } usb_remove_hcd(hcd); if (pdata->config_gpio) pdata->config_gpio(0); if (pdata->vbus_power) pdata->vbus_power(pdata->phy_info, 0); msm_xusb_disable_clks(mhcd); wake_lock_timeout(&mhcd->wlock, HZ/2); msm_xusb_pm_qos_update(mhcd, 0); pm_runtime_put_noidle(dev); pm_runtime_suspend(dev); break; } }
static void link_pm_runtime_work(struct work_struct *work) { int ret; struct link_pm_data *pm_data = container_of(work, struct link_pm_data, link_pm_work.work); struct device *dev = &pm_data->usb_ld->usbdev->dev; if (!pm_data->usb_ld->if_usb_connected || pm_data->dpm_suspending) return; if (pm_data->usb_ld->ld.com_state == COM_NONE) return; mif_debug("for dev 0x%p : current %d\n", dev, dev->power.runtime_status); switch (dev->power.runtime_status) { case RPM_ACTIVE: pm_data->resume_retry_cnt = 0; pm_data->resume_requested = false; complete(&pm_data->active_done); return; case RPM_SUSPENDED: if (pm_data->resume_requested) break; pm_data->resume_requested = true; wake_lock(&pm_data->rpm_wake); ret = link_pm_slave_wake(pm_data); if (ret < 0) { mif_err("slave wake fail\n"); wake_unlock(&pm_data->rpm_wake); break; } if (!pm_data->usb_ld->if_usb_connected) { wake_unlock(&pm_data->rpm_wake); return; } ret = pm_runtime_resume(dev); if (ret < 0) { mif_err("resume error(%d)\n", ret); if (!pm_data->usb_ld->if_usb_connected) { wake_unlock(&pm_data->rpm_wake); return; } /* force to go runtime idle before retry resume */ if (dev->power.timer_expires == 0 && !dev->power.request_pending) { mif_debug("run time idle\n"); pm_runtime_idle(dev); } } wake_unlock(&pm_data->rpm_wake); break; default: break; } pm_data->resume_requested = false; /* check until runtime_status goes to active */ /* attemp 10 times, or re-establish modem-link */ /* if pm_runtime_resume run properly, rpm status must be in ACTIVE */ if (dev->power.runtime_status == RPM_ACTIVE) { pm_data->resume_retry_cnt = 0; complete(&pm_data->active_done); } else if (pm_data->resume_retry_cnt++ > 10) { mif_err("runtime_status(%d), retry_cnt(%d)\n", dev->power.runtime_status, pm_data->resume_retry_cnt); link_pm_change_modem_state(pm_data, STATE_CRASH_RESET); } else queue_delayed_work(pm_data->wq, &pm_data->link_pm_work, msecs_to_jiffies(20)); }
static int __devinit sh_mmcif_probe(struct platform_device *pdev) { int ret = 0, irq[2]; struct mmc_host *mmc; struct sh_mmcif_host *host; struct sh_mmcif_plat_data *pd = pdev->dev.platform_data; struct resource *res; void __iomem *reg; char clk_name[8]; irq[0] = platform_get_irq(pdev, 0); irq[1] = platform_get_irq(pdev, 1); if (irq[0] < 0 || irq[1] < 0) { dev_err(&pdev->dev, "Get irq error\n"); return -ENXIO; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "platform_get_resource error.\n"); return -ENXIO; } reg = ioremap(res->start, resource_size(res)); if (!reg) { dev_err(&pdev->dev, "ioremap error.\n"); return -ENOMEM; } mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev); if (!mmc) { ret = -ENOMEM; goto ealloch; } host = mmc_priv(mmc); host->mmc = mmc; host->addr = reg; host->timeout = 1000; host->pd = pdev; spin_lock_init(&host->lock); mmc->ops = &sh_mmcif_ops; sh_mmcif_init_ocr(host); mmc->caps = MMC_CAP_MMC_HIGHSPEED; if (pd && pd->caps) mmc->caps |= pd->caps; mmc->max_segs = 32; mmc->max_blk_size = 512; mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs; mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size; mmc->max_seg_size = mmc->max_req_size; platform_set_drvdata(pdev, host); pm_runtime_enable(&pdev->dev); host->power = false; snprintf(clk_name, sizeof(clk_name), "mmc%d", pdev->id); host->hclk = clk_get(&pdev->dev, clk_name); if (IS_ERR(host->hclk)) { ret = PTR_ERR(host->hclk); dev_err(&pdev->dev, "cannot get clock \"%s\": %d\n", clk_name, ret); goto eclkget; } ret = sh_mmcif_clk_update(host); if (ret < 0) goto eclkupdate; ret = pm_runtime_resume(&pdev->dev); if (ret < 0) goto eresume; INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work); sh_mmcif_sync_reset(host); sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); ret = request_threaded_irq(irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:error", host); if (ret) { dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n"); goto ereqirq0; } ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:int", host); if (ret) { dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n"); goto ereqirq1; } if (pd && pd->use_cd_gpio) { ret = mmc_gpio_request_cd(mmc, pd->cd_gpio); if (ret < 0) goto erqcd; } clk_disable(host->hclk); ret = mmc_add_host(mmc); if (ret < 0) goto emmcaddh; dev_pm_qos_expose_latency_limit(&pdev->dev, 100); dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION); dev_dbg(&pdev->dev, "chip ver H'%04x\n", sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff); return ret; emmcaddh: if (pd && pd->use_cd_gpio) mmc_gpio_free_cd(mmc); erqcd: free_irq(irq[1], host); ereqirq1: free_irq(irq[0], host); ereqirq0: pm_runtime_suspend(&pdev->dev); eresume: clk_disable(host->hclk); eclkupdate: clk_put(host->hclk); eclkget: pm_runtime_disable(&pdev->dev); mmc_free_host(mmc); ealloch: iounmap(reg); return ret; }
static int sh_mmcif_probe(struct platform_device *pdev) { int ret = 0, irq[2]; struct mmc_host *mmc; struct sh_mmcif_host *host; struct sh_mmcif_plat_data *pd = pdev->dev.platform_data; struct resource *res; void __iomem *reg; const char *name; irq[0] = platform_get_irq(pdev, 0); irq[1] = platform_get_irq(pdev, 1); if (irq[0] < 0) { dev_err(&pdev->dev, "Get irq error\n"); return -ENXIO; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); reg = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(reg)) return PTR_ERR(reg); mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev); if (!mmc) return -ENOMEM; ret = mmc_of_parse(mmc); if (ret < 0) goto err_host; host = mmc_priv(mmc); host->mmc = mmc; host->addr = reg; host->timeout = msecs_to_jiffies(10000); host->ccs_enable = !pd || !pd->ccs_unsupported; host->clk_ctrl2_enable = pd && pd->clk_ctrl2_present; host->pd = pdev; spin_lock_init(&host->lock); mmc->ops = &sh_mmcif_ops; sh_mmcif_init_ocr(host); mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY; if (pd && pd->caps) mmc->caps |= pd->caps; mmc->max_segs = 32; mmc->max_blk_size = 512; mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs; mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size; mmc->max_seg_size = mmc->max_req_size; platform_set_drvdata(pdev, host); pm_runtime_enable(&pdev->dev); host->power = false; host->hclk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(host->hclk)) { ret = PTR_ERR(host->hclk); dev_err(&pdev->dev, "cannot get clock: %d\n", ret); goto err_pm; } ret = sh_mmcif_clk_update(host); if (ret < 0) goto err_pm; ret = pm_runtime_resume(&pdev->dev); if (ret < 0) goto err_clk; INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work); sh_mmcif_sync_reset(host); sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); name = irq[1] < 0 ? dev_name(&pdev->dev) : "sh_mmc:error"; ret = devm_request_threaded_irq(&pdev->dev, irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, name, host); if (ret) { dev_err(&pdev->dev, "request_irq error (%s)\n", name); goto err_clk; } if (irq[1] >= 0) { ret = devm_request_threaded_irq(&pdev->dev, irq[1], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:int", host); if (ret) { dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n"); goto err_clk; } } if (pd && pd->use_cd_gpio) { ret = mmc_gpio_request_cd(mmc, pd->cd_gpio, 0); if (ret < 0) goto err_clk; } mutex_init(&host->thread_lock); ret = mmc_add_host(mmc); if (ret < 0) goto err_clk; dev_pm_qos_expose_latency_limit(&pdev->dev, 100); dev_info(&pdev->dev, "Chip version 0x%04x, clock rate %luMHz\n", sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0xffff, clk_get_rate(host->hclk) / 1000000UL); clk_disable_unprepare(host->hclk); return ret; err_clk: clk_disable_unprepare(host->hclk); err_pm: pm_runtime_disable(&pdev->dev); err_host: mmc_free_host(mmc); return ret; }
/** * dwc3_otg_sm_work - workqueue function. * * @w: Pointer to the dwc3 otg workqueue * * NOTE: After any change in phy->state, * we must reschdule the state machine. */ static void dwc3_otg_sm_work(struct work_struct *w) { struct dwc3_otg *dotg = container_of(w, struct dwc3_otg, sm_work.work); struct usb_phy *phy = dotg->otg.phy; struct dwc3_charger *charger = dotg->charger; bool work = 0; int ret = 0; unsigned long delay = 0; pm_runtime_resume(phy->dev); dev_info(phy->dev, "%s state\n", otg_state_string(phy->state)); /* Check OTG state */ switch (phy->state) { case OTG_STATE_UNDEFINED: dwc3_otg_init_sm(dotg); if (!dotg->psy) { dotg->psy = power_supply_get_by_name("usb"); if (!dotg->psy) dev_err(phy->dev, "couldn't get usb power supply\n"); } /* Switch to A or B-Device according to ID / BSV */ if (!test_bit(ID, &dotg->inputs)) { dev_dbg(phy->dev, "!id\n"); phy->state = OTG_STATE_A_IDLE; work = 1; } else if (test_bit(B_SESS_VLD, &dotg->inputs)) { dev_dbg(phy->dev, "b_sess_vld\n"); phy->state = OTG_STATE_B_IDLE; work = 1; } else { phy->state = OTG_STATE_B_IDLE; dev_dbg(phy->dev, "No device, trying to suspend\n"); pm_runtime_put_sync(phy->dev); } break; case OTG_STATE_B_IDLE: if (!test_bit(ID, &dotg->inputs)) { dev_dbg(phy->dev, "!id\n"); phy->state = OTG_STATE_A_IDLE; work = 1; dotg->charger_retry_count = 0; if (charger) { if (charger->chg_type == DWC3_INVALID_CHARGER) charger->start_detection(dotg->charger, false); else charger->chg_type = DWC3_INVALID_CHARGER; } } else if (test_bit(B_SESS_VLD, &dotg->inputs)) { dev_dbg(phy->dev, "b_sess_vld\n"); if (charger) { /* Has charger been detected? If no detect it */ switch (charger->chg_type) { case DWC3_DCP_CHARGER: case DWC3_PROPRIETARY_CHARGER: dev_dbg(phy->dev, "lpm, DCP charger\n"); dwc3_otg_set_power(phy, DWC3_IDEV_CHG_MAX); pm_runtime_put_sync(phy->dev); break; case DWC3_CDP_CHARGER: dwc3_otg_set_power(phy, DWC3_IDEV_CHG_MAX); dwc3_otg_start_peripheral(&dotg->otg, 1); phy->state = OTG_STATE_B_PERIPHERAL; work = 1; break; case DWC3_SDP_CHARGER: dwc3_otg_start_peripheral(&dotg->otg, 1); phy->state = OTG_STATE_B_PERIPHERAL; work = 1; break; case DWC3_FLOATED_CHARGER: if (dotg->charger_retry_count < max_chgr_retry_count) dotg->charger_retry_count++; /* * In case of floating charger, if * retry count equal to max retry count * notify PMIC about floating charger * and put Hw in low power mode. Else * perform charger detection again by * calling start_detection() with false * and then with true argument. */ if (dotg->charger_retry_count == max_chgr_retry_count) { dwc3_otg_set_power(phy, 0); qpnp_chg_notify_invalid_usb(); pm_runtime_put_sync(phy->dev); break; } charger->start_detection(dotg->charger, false); default: dev_dbg(phy->dev, "chg_det started\n"); charger->start_detection(charger, true); break; } } else { /* no charger registered, start peripheral */ if (dwc3_otg_start_peripheral(&dotg->otg, 1)) { /* * Probably set_peripheral not called * yet. We will re-try as soon as it * will be called */ dev_err(phy->dev, "enter lpm as\n" "unable to start B-device\n"); phy->state = OTG_STATE_UNDEFINED; pm_runtime_put_sync(phy->dev); return; } } } else { if (charger) charger->start_detection(dotg->charger, false); dotg->charger_retry_count = 0; dwc3_otg_set_power(phy, 0); dev_dbg(phy->dev, "No device, trying to suspend\n"); pm_runtime_put_sync(phy->dev); } break; case OTG_STATE_B_PERIPHERAL: if (!test_bit(B_SESS_VLD, &dotg->inputs) || !test_bit(ID, &dotg->inputs)) { dev_dbg(phy->dev, "!id || !bsv\n"); dwc3_otg_start_peripheral(&dotg->otg, 0); phy->state = OTG_STATE_B_IDLE; if (charger) charger->chg_type = DWC3_INVALID_CHARGER; work = 1; } break; case OTG_STATE_A_IDLE: /* Switch to A-Device*/ if (test_bit(ID, &dotg->inputs)) { dev_dbg(phy->dev, "id\n"); phy->state = OTG_STATE_B_IDLE; dotg->vbus_retry_count = 0; work = 1; clear_bit(A_VBUS_DROP_DET, &dotg->inputs); } else if (test_bit(A_VBUS_DROP_DET, &dotg->inputs)) { dev_dbg(phy->dev, "vbus_drop_det\n"); /* staying on here until exit from A-Device */ } else { phy->state = OTG_STATE_A_HOST; ret = dwc3_otg_start_host(&dotg->otg, 1); if ((ret == -EPROBE_DEFER) && dotg->vbus_retry_count < 3) { /* * Get regulator failed as regulator driver is * not up yet. Will try to start host after 1sec */ phy->state = OTG_STATE_A_IDLE; dev_dbg(phy->dev, "Unable to get vbus regulator. Retrying...\n"); delay = VBUS_REG_CHECK_DELAY; work = 1; dotg->vbus_retry_count++; } else if (ret) { /* * Probably set_host was not called yet. * We will re-try as soon as it will be called */ dev_dbg(phy->dev, "enter lpm as\n" "unable to start A-device\n"); phy->state = OTG_STATE_A_IDLE; pm_runtime_put_sync(phy->dev); return; } } break; case OTG_STATE_A_HOST: if (test_bit(ID, &dotg->inputs)) { dev_dbg(phy->dev, "id\n"); dwc3_otg_start_host(&dotg->otg, 0); phy->state = OTG_STATE_B_IDLE; dotg->vbus_retry_count = 0; work = 1; clear_bit(A_VBUS_DROP_DET, &dotg->inputs); } else if (test_bit(A_VBUS_DROP_DET, &dotg->inputs)) { dev_dbg(phy->dev, "vbus_drop_det\n"); dwc3_otg_start_host(&dotg->otg, 0); phy->state = OTG_STATE_A_IDLE; work = 1; #ifdef CONFIG_USB_HOST_EXTRA_NOTIFICATION host_send_uevent(USB_HOST_EXT_EVENT_VBUS_DROP); #endif } break; default: dev_err(phy->dev, "%s: invalid otg-state\n", __func__); } if (work) queue_delayed_work(system_nrt_wq, &dotg->sm_work, delay); }
static int sh_eth_drv_probe(struct platform_device *pdev) { int ret, devno = 0; struct resource *res; struct net_device *ndev = NULL; struct sh_eth_private *mdp = NULL; struct sh_eth_plat_data *pd; /* get base addr */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (unlikely(res == NULL)) { dev_err(&pdev->dev, "invalid resource\n"); ret = -EINVAL; goto out; } ndev = alloc_etherdev(sizeof(struct sh_eth_private)); if (!ndev) { dev_err(&pdev->dev, "Could not allocate device.\n"); ret = -ENOMEM; goto out; } /* The sh Ether-specific entries in the device structure. */ ndev->base_addr = res->start; devno = pdev->id; if (devno < 0) devno = 0; ndev->dma = -1; ret = platform_get_irq(pdev, 0); if (ret < 0) { ret = -ENODEV; goto out_release; } ndev->irq = ret; SET_NETDEV_DEV(ndev, &pdev->dev); /* Fill in the fields of the device structure with ethernet values. */ ether_setup(ndev); mdp = netdev_priv(ndev); mdp->addr = ioremap(res->start, resource_size(res)); if (mdp->addr == NULL) { ret = -ENOMEM; dev_err(&pdev->dev, "ioremap failed.\n"); goto out_release; } spin_lock_init(&mdp->lock); mdp->pdev = pdev; pm_runtime_enable(&pdev->dev); pm_runtime_resume(&pdev->dev); pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data); /* get PHY ID */ mdp->phy_id = pd->phy; mdp->phy_interface = pd->phy_interface; /* EDMAC endian */ mdp->edmac_endian = pd->edmac_endian; mdp->no_ether_link = pd->no_ether_link; mdp->ether_link_active_low = pd->ether_link_active_low; mdp->reg_offset = sh_eth_get_register_offset(pd->register_type); /* set cpu data */ #if defined(SH_ETH_HAS_BOTH_MODULES) mdp->cd = sh_eth_get_cpu_data(mdp); #else mdp->cd = &sh_eth_my_cpu_data; #endif sh_eth_set_default_cpu_data(mdp->cd); /* set function */ ndev->netdev_ops = &sh_eth_netdev_ops; SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops); ndev->watchdog_timeo = TX_TIMEOUT; /* debug message level */ mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE; mdp->post_rx = POST_RX >> (devno << 1); mdp->post_fw = POST_FW >> (devno << 1); /* read and set MAC address */ read_mac_address(ndev, pd->mac_addr); /* First device only init */ if (!devno) { if (mdp->cd->tsu) { struct resource *rtsu; rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!rtsu) { dev_err(&pdev->dev, "Not found TSU resource\n"); goto out_release; } mdp->tsu_addr = ioremap(rtsu->start, resource_size(rtsu)); } if (mdp->cd->chip_reset) mdp->cd->chip_reset(ndev); if (mdp->cd->tsu) { /* TSU init (Init only)*/ sh_eth_tsu_init(mdp); } } /* network device register */ ret = register_netdev(ndev); if (ret) goto out_release; /* mdio bus init */ ret = sh_mdio_init(ndev, pdev->id, pd); if (ret) goto out_unregister; /* print device information */ pr_info("Base address at 0x%x, %pM, IRQ %d.\n", (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); platform_set_drvdata(pdev, ndev); return ret; out_unregister: unregister_netdev(ndev); out_release: /* net_dev free */ if (mdp && mdp->addr) iounmap(mdp->addr); if (mdp && mdp->tsu_addr) iounmap(mdp->tsu_addr); if (ndev) free_netdev(ndev); out: return ret; }
/* * Instantiate the generic non-control parts of the device. */ static int wm8994_device_init(struct wm8994 *wm8994, int irq) { struct wm8994_pdata *pdata = wm8994->dev->platform_data; struct regmap_config *regmap_config; const char *devname; int ret, i; int pulls = 0; dev_set_drvdata(wm8994->dev, wm8994); /* Add the on-chip regulators first for bootstrapping */ ret = mfd_add_devices(wm8994->dev, -1, wm8994_regulator_devs, ARRAY_SIZE(wm8994_regulator_devs), NULL, 0); if (ret != 0) { dev_err(wm8994->dev, "Failed to add children: %d\n", ret); goto err_regmap; } switch (wm8994->type) { case WM1811: wm8994->num_supplies = ARRAY_SIZE(wm1811_main_supplies); break; case WM8994: wm8994->num_supplies = ARRAY_SIZE(wm8994_main_supplies); break; case WM8958: wm8994->num_supplies = ARRAY_SIZE(wm8958_main_supplies); break; default: BUG(); goto err_regmap; } wm8994->supplies = devm_kzalloc(wm8994->dev, sizeof(struct regulator_bulk_data) * wm8994->num_supplies, GFP_KERNEL); if (!wm8994->supplies) { ret = -ENOMEM; goto err_regmap; } switch (wm8994->type) { case WM1811: for (i = 0; i < ARRAY_SIZE(wm1811_main_supplies); i++) wm8994->supplies[i].supply = wm1811_main_supplies[i]; break; case WM8994: for (i = 0; i < ARRAY_SIZE(wm8994_main_supplies); i++) wm8994->supplies[i].supply = wm8994_main_supplies[i]; break; case WM8958: for (i = 0; i < ARRAY_SIZE(wm8958_main_supplies); i++) wm8994->supplies[i].supply = wm8958_main_supplies[i]; break; default: BUG(); goto err_regmap; } ret = regulator_bulk_get(wm8994->dev, wm8994->num_supplies, wm8994->supplies); if (ret != 0) { dev_err(wm8994->dev, "Failed to get supplies: %d\n", ret); goto err_regmap; } ret = regulator_bulk_enable(wm8994->num_supplies, wm8994->supplies); if (ret != 0) { dev_err(wm8994->dev, "Failed to enable supplies: %d\n", ret); goto err_get; } ret = wm8994_reg_read(wm8994, WM8994_SOFTWARE_RESET); if (ret < 0) { dev_err(wm8994->dev, "Failed to read ID register\n"); goto err_enable; } switch (ret) { case 0x1811: devname = "WM1811"; if (wm8994->type != WM1811) dev_warn(wm8994->dev, "Device registered as type %d\n", wm8994->type); wm8994->type = WM1811; break; case 0x8994: devname = "WM8994"; if (wm8994->type != WM8994) dev_warn(wm8994->dev, "Device registered as type %d\n", wm8994->type); wm8994->type = WM8994; break; case 0x8958: devname = "WM8958"; if (wm8994->type != WM8958) dev_warn(wm8994->dev, "Device registered as type %d\n", wm8994->type); wm8994->type = WM8958; break; default: dev_err(wm8994->dev, "Device is not a WM8994, ID is %x\n", ret); ret = -EINVAL; goto err_enable; } ret = wm8994_reg_read(wm8994, WM8994_CHIP_REVISION); if (ret < 0) { dev_err(wm8994->dev, "Failed to read revision register: %d\n", ret); goto err_enable; } wm8994->revision = ret; switch (wm8994->type) { case WM8994: switch (wm8994->revision) { case 0: case 1: dev_warn(wm8994->dev, "revision %c not fully supported\n", 'A' + wm8994->revision); break; default: break; } break; case WM1811: /* Revision C did not change the relevant layer */ if (wm8994->revision > 1) wm8994->revision++; break; default: break; } dev_info(wm8994->dev, "%s revision %c\n", devname, 'A' + wm8994->revision); switch (wm8994->type) { case WM1811: regmap_config = &wm1811_regmap_config; break; case WM8994: regmap_config = &wm8994_regmap_config; break; case WM8958: regmap_config = &wm8958_regmap_config; break; default: dev_err(wm8994->dev, "Unknown device type %d\n", wm8994->type); return -EINVAL; } ret = regmap_reinit_cache(wm8994->regmap, regmap_config); if (ret != 0) { dev_err(wm8994->dev, "Failed to reinit register cache: %d\n", ret); return ret; } if (pdata) { wm8994->irq_base = pdata->irq_base; wm8994->gpio_base = pdata->gpio_base; /* GPIO configuration is only applied if it's non-zero */ for (i = 0; i < ARRAY_SIZE(pdata->gpio_defaults); i++) { if (pdata->gpio_defaults[i]) { wm8994_set_bits(wm8994, WM8994_GPIO_1 + i, 0xffff, pdata->gpio_defaults[i]); } } wm8994->ldo_ena_always_driven = pdata->ldo_ena_always_driven; if (pdata->spkmode_pu) pulls |= WM8994_SPKMODE_PU; } /* Disable unneeded pulls */ wm8994_set_bits(wm8994, WM8994_PULL_CONTROL_2, WM8994_LDO1ENA_PD | WM8994_LDO2ENA_PD | WM8994_SPKMODE_PU | WM8994_CSNADDR_PD, pulls); /* In some system designs where the regulators are not in use, * we can achieve a small reduction in leakage currents by * floating LDO outputs. This bit makes no difference if the * LDOs are enabled, it only affects cases where the LDOs were * in operation and are then disabled. */ for (i = 0; i < WM8994_NUM_LDO_REGS; i++) { if (wm8994_ldo_in_use(pdata, i)) wm8994_set_bits(wm8994, WM8994_LDO_1 + i, WM8994_LDO1_DISCH, WM8994_LDO1_DISCH); else wm8994_set_bits(wm8994, WM8994_LDO_1 + i, WM8994_LDO1_DISCH, 0); } wm8994_irq_init(wm8994); ret = mfd_add_devices(wm8994->dev, -1, wm8994_devs, ARRAY_SIZE(wm8994_devs), NULL, 0); if (ret != 0) { dev_err(wm8994->dev, "Failed to add children: %d\n", ret); goto err_irq; } pm_runtime_enable(wm8994->dev); pm_runtime_resume(wm8994->dev); return 0; err_irq: wm8994_irq_exit(wm8994); err_enable: regulator_bulk_disable(wm8994->num_supplies, wm8994->supplies); err_get: regulator_bulk_free(wm8994->num_supplies, wm8994->supplies); err_regmap: regmap_exit(wm8994->regmap); mfd_remove_devices(wm8994->dev); return ret; }
static long link_pm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int value, err = 0; struct task_struct *task = get_current(); struct link_pm_data *pm_data = file->private_data; struct usb_link_device *usb_ld = pm_data->usb_ld; char taskname[TASK_COMM_LEN]; pr_info("mif: %s: 0x%08x\n", __func__, cmd); switch (cmd) { case IOCTL_LINK_CONTROL_ACTIVE: if (copy_from_user(&value, (const void __user *)arg, sizeof(int))) return -EFAULT; gpio_set_value(pm_data->gpio_link_active, value); break; case IOCTL_LINK_GET_HOSTWAKE: return !gpio_get_value(pm_data->gpio_link_hostwake); case IOCTL_LINK_CONNECTED: return pm_data->usb_ld->if_usb_connected; case IOCTL_LINK_PORT_ON: /* hub only */ /* ignore cp host wakeup irq, set the hub_init_lock when AP try CP off and release hub_init_lock when CP boot done */ pm_data->hub_init_lock = 0; if (pm_data->root_hub) { pm_runtime_resume(pm_data->root_hub); pm_runtime_forbid(pm_data->root_hub->parent); } if (pm_data->port_enable) { err = pm_data->port_enable(2, 1); if (err < 0) { pr_err("mif: %s: hub on fail err=%d\n", __func__, err); goto exit; } pm_data->hub_status = HUB_STATE_RESUMMING; } break; case IOCTL_LINK_PORT_OFF: /* hub only */ if (pm_data->usb_ld->if_usb_connected) { struct usb_device *udev = pm_data->usb_ld->usbdev->parent; pm_runtime_get_sync(&udev->dev); if (udev->state != USB_STATE_NOTATTACHED) { usb_force_disconnect(udev); pr_info("force disconnect maybe cp-reset!!\n"); } pm_runtime_put_autosuspend(&udev->dev); } err = link_pm_hub_standby(pm_data); if (err < 0) { pr_err("mif: %s: usb3503 active fail\n", __func__); goto exit; } pm_data->hub_init_lock = 1; pm_data->hub_handshake_done = 0; break; case IOCTL_LINK_BLOCK_AUTOSUSPEND: /* block autosuspend forever */ mif_info("blocked autosuspend by `%s(%d)'\n", get_task_comm(taskname, task), task->pid); pm_data->block_autosuspend = true; if (usb_ld->usbdev) pm_runtime_forbid(&usb_ld->usbdev->dev); else { mif_err("Block autosuspend failed\n"); err = -ENODEV; } break; case IOCTL_LINK_ENABLE_AUTOSUSPEND: /* Enable autosuspend */ mif_info("autosuspend enabled by `%s(%d)'\n", get_task_comm(taskname, task), task->pid); pm_data->block_autosuspend = false; if (usb_ld->usbdev) pm_runtime_allow(&usb_ld->usbdev->dev); else { mif_err("Enable autosuspend failed\n"); err = -ENODEV; } break; default: break; } exit: return err; }