int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name, struct hid_sensor_common *attrb) { int ret; struct iio_trigger *trig; trig = iio_trigger_alloc("%s-dev%d", name, indio_dev->id); if (trig == NULL) { dev_err(&indio_dev->dev, "Trigger Allocate Failed\n"); ret = -ENOMEM; goto error_ret; } trig->dev.parent = indio_dev->dev.parent; iio_trigger_set_drvdata(trig, attrb); trig->ops = &hid_sensor_trigger_ops; ret = iio_trigger_register(trig); if (ret) { dev_err(&indio_dev->dev, "Trigger Register Failed\n"); goto error_free_trig; } attrb->trigger = trig; indio_dev->trig = iio_trigger_get(trig); ret = pm_runtime_set_active(&indio_dev->dev); if (ret) goto error_unreg_trigger; iio_device_set_drvdata(indio_dev, attrb); pm_suspend_ignore_children(&attrb->pdev->dev, true); pm_runtime_enable(&attrb->pdev->dev); /* Default to 3 seconds, but can be changed from sysfs */ pm_runtime_set_autosuspend_delay(&attrb->pdev->dev, 3000); pm_runtime_use_autosuspend(&attrb->pdev->dev); return ret; error_unreg_trigger: iio_trigger_unregister(trig); error_free_trig: iio_trigger_free(trig); error_ret: return ret; }
/** * mmc_resume_host - resume a previously suspended host * @host: mmc host */ int mmc_resume_host(struct mmc_host *host) { int err = 0; mmc_bus_get(host); if (mmc_bus_manual_resume(host)) { host->bus_resume_flags |= MMC_BUSRESUME_NEEDS_RESUME; mmc_bus_put(host); return 0; } if (host->bus_ops && !host->bus_dead) { if (!mmc_card_keep_power(host)) { mmc_power_up(host); mmc_select_voltage(host, host->ocr); /* * Tell runtime PM core we just powered up the card, * since it still believes the card is powered off. * Note that currently runtime PM is only enabled * for SDIO cards that are MMC_CAP_POWER_OFF_CARD */ if (mmc_card_sdio(host->card) && (host->caps & MMC_CAP_POWER_OFF_CARD)) { pm_runtime_disable(&host->card->dev); pm_runtime_set_active(&host->card->dev); pm_runtime_enable(&host->card->dev); } } BUG_ON(!host->bus_ops->resume); err = host->bus_ops->resume(host); if (err) { printk(KERN_WARNING "%s: error %d during resume " "(card was removed?)\n", mmc_hostname(host), err); err = 0; } } /* clear flag */ host->pm_flags &= ~MMC_PM_KEEP_POWER; mmc_bus_put(host); return err; }
static int scsi_dev_type_resume(struct device *dev, int (*cb)(struct device *, const struct dev_pm_ops *)) { const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; int err = 0; err = cb(dev, pm); scsi_device_resume(to_scsi_device(dev)); dev_dbg(dev, "scsi resume: %d\n", err); if (err == 0) { pm_runtime_disable(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); } return err; }
static int mlx90614_pm_resume(struct device *dev) { struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); struct mlx90614_data *data = iio_priv(indio_dev); int err; if (data->wakeup_gpio) { err = mlx90614_wakeup(data); if (err < 0) return err; pm_runtime_disable(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); } return 0; }
static int dwc3_resume(struct device *dev) { struct dwc3 *dwc = dev_get_drvdata(dev); unsigned long flags; int ret; usb_phy_init(dwc->usb3_phy); usb_phy_init(dwc->usb2_phy); ret = phy_init(dwc->usb2_generic_phy); if (ret < 0) return ret; ret = phy_init(dwc->usb3_generic_phy); if (ret < 0) goto err_usb2phy_init; spin_lock_irqsave(&dwc->lock, flags); dwc3_writel(dwc->regs, DWC3_GCTL, dwc->gctl); switch (dwc->dr_mode) { case USB_DR_MODE_PERIPHERAL: case USB_DR_MODE_OTG: dwc3_gadget_resume(dwc); /* FALLTHROUGH */ case USB_DR_MODE_HOST: default: /* do nothing */ break; } spin_unlock_irqrestore(&dwc->lock, flags); pm_runtime_disable(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); return 0; err_usb2phy_init: phy_exit(dwc->usb2_generic_phy); return ret; }
static int scsi_bus_resume_common(struct device *dev, int (*cb)(struct device *, const struct dev_pm_ops *)) { async_func_t fn; if (!scsi_is_sdev_device(dev)) fn = NULL; else if (cb == do_scsi_resume) fn = async_sdev_resume; else if (cb == do_scsi_thaw) fn = async_sdev_thaw; else if (cb == do_scsi_restore) fn = async_sdev_restore; else fn = NULL; /* * Forcibly set runtime PM status of request queue to "active" to * make sure we can again get requests from the queue (see also * blk_pm_peek_request()). * * The resume hook will correct runtime PM status of the disk. */ if (scsi_is_sdev_device(dev) && pm_runtime_suspended(dev)) blk_set_runtime_active(to_scsi_device(dev)->request_queue); if (fn) { async_schedule_domain(fn, dev, &scsi_sd_pm_domain); /* * If a user has disabled async probing a likely reason * is due to a storage enclosure that does not inject * staggered spin-ups. For safety, make resume * synchronous as well in that case. */ if (strncmp(scsi_scan_type, "async", 5) != 0) async_synchronize_full_domain(&scsi_sd_pm_domain); } else { pm_runtime_disable(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); } return 0; }
static int _omap_device_notifier_call(struct notifier_block *nb, unsigned long event, void *dev) { struct platform_device *pdev = to_platform_device(dev); struct omap_device *od; int err; switch (event) { case BUS_NOTIFY_REMOVED_DEVICE: if (pdev->archdata.od) omap_device_delete(pdev->archdata.od); break; case BUS_NOTIFY_UNBOUND_DRIVER: od = to_omap_device(pdev); if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED)) { dev_info(dev, "enabled after unload, idling\n"); err = omap_device_idle(pdev); if (err) dev_err(dev, "failed to idle\n"); } break; case BUS_NOTIFY_BIND_DRIVER: od = to_omap_device(pdev); if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED) && pm_runtime_status_suspended(dev)) { od->_driver_status = BUS_NOTIFY_BIND_DRIVER; pm_runtime_set_active(dev); } break; case BUS_NOTIFY_ADD_DEVICE: if (pdev->dev.of_node) omap_device_build_from_dt(pdev); omap_auxdata_legacy_init(dev); /* fall through */ default: od = to_omap_device(pdev); if (od) od->_driver_status = event; } return NOTIFY_DONE; }
static int diag_smd_probe(struct platform_device *pdev) { int r = 0; int index = -1; if (pdev->id == SMD_APPS_MODEM) { index = MODEM_DATA; r = smd_open("DIAG", &driver->smd_data[index].ch, &driver->smd_data[index], diag_smd_notify); driver->smd_data[index].ch_save = driver->smd_data[index].ch; } #if defined(CONFIG_MSM_N_WAY_SMD) if (pdev->id == SMD_APPS_QDSP) { index = LPASS_DATA; r = smd_named_open_on_edge("DIAG", SMD_APPS_QDSP, &driver->smd_data[index].ch, &driver->smd_data[index], diag_smd_notify); driver->smd_data[index].ch_save = driver->smd_data[index].ch; } #endif if (pdev->id == SMD_APPS_WCNSS) { index = WCNSS_DATA; r = smd_named_open_on_edge("APPS_RIVA_DATA", SMD_APPS_WCNSS, &driver->smd_data[index].ch, &driver->smd_data[index], diag_smd_notify); driver->smd_data[index].ch_save = driver->smd_data[index].ch; } pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); pr_debug("diag: open SMD port, Id = %d, r = %d\n", pdev->id, r); return 0; }
int usb_hub_create_port_device(struct usb_hub *hub, int port1) { struct usb_port *port_dev = NULL; int retval; port_dev = kzalloc(sizeof(*port_dev), GFP_KERNEL); if (!port_dev) { retval = -ENOMEM; goto exit; } hub->ports[port1 - 1] = port_dev; port_dev->portnum = port1; port_dev->power_is_on = true; port_dev->dev.parent = hub->intfdev; port_dev->dev.groups = port_dev_group; port_dev->dev.type = &usb_port_device_type; dev_set_name(&port_dev->dev, "port%d", port1); retval = device_register(&port_dev->dev); if (retval) goto error_register; pm_runtime_set_active(&port_dev->dev); /* It would be dangerous if user space couldn't * prevent usb device from being powered off. So don't * enable port runtime pm if failed to expose port's pm qos. */ if (!dev_pm_qos_expose_flags(&port_dev->dev, PM_QOS_FLAG_NO_POWER_OFF)) pm_runtime_enable(&port_dev->dev); device_enable_async_suspend(&port_dev->dev); return 0; error_register: put_device(&port_dev->dev); exit: return retval; }
/** * ufshcd_pltfrm_probe - probe routine of the driver * @pdev: pointer to Platform device handle * * Returns 0 on success, non-zero value on failure */ static int ufshcd_pltfrm_probe(struct platform_device *pdev) { struct ufs_hba *hba; void __iomem *mmio_base; struct resource *mem_res; int irq, err; struct device *dev = &pdev->dev; mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); mmio_base = devm_ioremap_resource(dev, mem_res); if (IS_ERR(mmio_base)) { err = PTR_ERR(mmio_base); goto out; } irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(dev, "IRQ resource not available\n"); err = -ENODEV; goto out; } pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); err = ufshcd_init(dev, &hba, mmio_base, irq); if (err) { dev_err(dev, "Intialization failed\n"); goto out_disable_rpm; } platform_set_drvdata(pdev, hba); return 0; out_disable_rpm: pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); out: return err; }
/* RPM init */ int i915_rpm_init(struct drm_device *drm_dev) { int ret = 0; struct device *dev = drm_dev->dev; struct drm_i915_private *dev_priv = drm_dev->dev_private; ret = i915_rpm_procfs_init(drm_dev); if (ret) { DRM_ERROR("unable to initialize procfs entry"); } ret = pm_runtime_set_active(dev); dev_priv->rpm.ring_active = false; atomic_set(&dev_priv->rpm.procfs_count, 0); pm_runtime_allow(dev); /* enable Auto Suspend */ pm_runtime_set_autosuspend_delay(dev, RPM_AUTOSUSPEND_DELAY); pm_runtime_use_autosuspend(dev); if (dev->power.runtime_error) DRM_ERROR("rpm init: error = %d\n", dev->power.runtime_error); return ret; }
static int msm_otg_pm_resume(struct device *dev) { struct msm_otg *motg = dev_get_drvdata(dev); int ret; dev_dbg(dev, "OTG PM resume\n"); ret = msm_otg_resume(motg); if (ret) return ret; /* * Runtime PM Documentation recommends bringing the * device to full powered state upon resume. */ pm_runtime_disable(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); return 0; }
/* * These are the device model conversion veneers; they convert the * device model structures to our more specific structures. */ static int amba_probe(struct device *dev) { struct amba_device *pcdev = to_amba_device(dev); struct amba_driver *pcdrv = to_amba_driver(dev->driver); const struct amba_id *id = amba_lookup(pcdrv->id_table, pcdev); int ret; do { ret = of_clk_set_defaults(dev->of_node, false); if (ret < 0) break; ret = dev_pm_domain_attach(dev, true); if (ret == -EPROBE_DEFER) break; ret = amba_get_enable_pclk(pcdev); if (ret) { dev_pm_domain_detach(dev, true); break; } pm_runtime_get_noresume(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); ret = pcdrv->probe(pcdev, id); if (ret == 0) break; pm_runtime_disable(dev); pm_runtime_set_suspended(dev); pm_runtime_put_noidle(dev); amba_put_disable_pclk(pcdev); dev_pm_domain_detach(dev, true); } while (0); return ret; }
static int __init exynos4x12_isp_clk_probe(struct platform_device *pdev) { struct samsung_clk_provider *ctx; struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct resource *res; void __iomem *reg_base; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); reg_base = devm_ioremap_resource(dev, res); if (IS_ERR(reg_base)) { dev_err(dev, "failed to map registers\n"); return PTR_ERR(reg_base); } exynos4x12_save_isp = samsung_clk_alloc_reg_dump(exynos4x12_clk_isp_save, ARRAY_SIZE(exynos4x12_clk_isp_save)); if (!exynos4x12_save_isp) return -ENOMEM; ctx = samsung_clk_init(np, reg_base, CLK_NR_ISP_CLKS); ctx->dev = dev; platform_set_drvdata(pdev, ctx); pm_runtime_set_active(dev); pm_runtime_enable(dev); pm_runtime_get_sync(dev); samsung_clk_register_div(ctx, exynos4x12_isp_div_clks, ARRAY_SIZE(exynos4x12_isp_div_clks)); samsung_clk_register_gate(ctx, exynos4x12_isp_gate_clks, ARRAY_SIZE(exynos4x12_isp_gate_clks)); samsung_clk_of_add_provider(np, ctx); pm_runtime_put(dev); return 0; }
static int intel_lpss_acpi_probe(struct platform_device *pdev) { struct intel_lpss_platform_info *info; const struct acpi_device_id *id; id = acpi_match_device(intel_lpss_acpi_ids, &pdev->dev); if (!id) return -ENODEV; info = devm_kmemdup(&pdev->dev, (void *)id->driver_data, sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; info->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); info->irq = platform_get_irq(pdev, 0); pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); return intel_lpss_probe(&pdev->dev, info); }
static int sh_cmt_probe(struct platform_device *pdev) { struct sh_cmt_priv *p = platform_get_drvdata(pdev); struct sh_timer_config *cfg = pdev->dev.platform_data; int ret; if (!is_early_platform_device(pdev)) { pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); } if (p) { dev_info(&pdev->dev, "kept as earlytimer\n"); goto out; } p = kmalloc(sizeof(*p), GFP_KERNEL); if (p == NULL) { dev_err(&pdev->dev, "failed to allocate driver data\n"); return -ENOMEM; } ret = sh_cmt_setup(p, pdev); if (ret) { kfree(p); pm_runtime_idle(&pdev->dev); return ret; } if (is_early_platform_device(pdev)) return 0; out: if (cfg->clockevent_rating || cfg->clocksource_rating) pm_runtime_irq_safe(&pdev->dev); else pm_runtime_idle(&pdev->dev); return 0; }
static int msm_hsic_pm_resume(struct device *dev) { int ret; struct usb_hcd *hcd = dev_get_drvdata(dev); struct msm_hsic_hcd *mehci = hcd_to_hsic(hcd); dbg_log_event(NULL, "PM Resume", 0); if (device_may_wakeup(dev)) disable_irq_wake(hcd->irq); ret = msm_hsic_resume(mehci); if (ret) return ret; /* Bring the device to full powered state upon system resume */ pm_runtime_disable(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); return 0; }
static int ehci_msm2_pm_resume(struct device *dev) { int ret; struct usb_hcd *hcd = dev_get_drvdata(dev); struct msm_hcd *mhcd = hcd_to_mhcd(hcd); dev_dbg(dev, "ehci-msm2 PM resume\n"); if (device_may_wakeup(dev)) disable_irq_wake(hcd->irq); ret = msm_ehci_resume(mhcd); if (ret) return ret; /* Bring the device to full powered state upon system resume */ pm_runtime_disable(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); return 0; }
static int _od_resume_noirq(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct omap_device *od = to_omap_device(pdev); if (od->flags & OMAP_DEVICE_SUSPENDED) { od->flags &= ~OMAP_DEVICE_SUSPENDED; omap_device_enable(pdev); /* * XXX: we run before core runtime pm has resumed itself. At * this point in time, we just restore the runtime pm state and * considering symmetric operations in resume, we donot expect * to fail. If we failed, something changed in core runtime_pm * framework OR some device driver messed things up, hence, WARN */ WARN(pm_runtime_set_active(dev), "Could not set %s runtime state active\n", dev_name(dev)); pm_generic_runtime_resume(dev); } return pm_generic_resume_noirq(dev); }
static enum MHI_STATUS process_sbl_transition( struct mhi_device_ctxt *mhi_dev_ctxt, enum STATE_TRANSITION cur_work_item) { int r; mhi_log(MHI_MSG_INFO, "Processing SBL state transition\n"); pm_runtime_set_autosuspend_delay(&mhi_dev_ctxt->dev_info->plat_dev->dev, MHI_RPM_AUTOSUSPEND_TMR_VAL_MS); pm_runtime_use_autosuspend(&mhi_dev_ctxt->dev_info->plat_dev->dev); r = pm_runtime_set_active(&mhi_dev_ctxt->dev_info->plat_dev->dev); if (r) { mhi_log(MHI_MSG_ERROR, "Failed to activate runtime pm ret %d\n", r); } pm_runtime_enable(&mhi_dev_ctxt->dev_info->plat_dev->dev); mhi_log(MHI_MSG_INFO, "Enabled runtime pm\n"); mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_SBL; wmb(); enable_clients(mhi_dev_ctxt, mhi_dev_ctxt->dev_exec_env); return MHI_STATUS_SUCCESS; }
static int mxhci_hsic_pm_resume(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); struct mxhci_hsic_hcd *mxhci = hcd_to_hsic(hcd); unsigned long flags; int ret; dev_dbg(dev, "xhci-msm PM resume\n"); xhci_dbg_log_event(&dbg_hsic, NULL, "PM Resume", 0); if (device_may_wakeup(dev)) disable_irq_wake(hcd->irq); /* * Keep HSIC in Low Power Mode if system is resumed * by any other wakeup source. HSIC is resumed later * when remote wakeup is received or interface driver * start I/O. */ spin_lock_irqsave(&mxhci->wakeup_lock, flags); if (!mxhci->pm_usage_cnt && pm_runtime_suspended(dev)) { spin_unlock_irqrestore(&mxhci->wakeup_lock, flags); return 0; } spin_unlock_irqrestore(&mxhci->wakeup_lock, flags); ret = mxhci_hsic_resume(mxhci); if (ret) return ret; /* Bring the device to full powered state upon system resume */ pm_runtime_disable(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); return 0; }
static int scsi_bus_resume_common(struct device *dev) { int err = 0; if (scsi_is_sdev_device(dev)) { /* * Parent device may have runtime suspended as soon as * it is woken up during the system resume. * * Resume it on behalf of child. */ pm_runtime_get_sync(dev->parent); err = scsi_dev_type_resume(dev); pm_runtime_put_sync(dev->parent); } if (err == 0) { pm_runtime_disable(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); } return err; }
static int s5p_ehci_resume(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); struct s5p_ehci_hcd *s5p_ehci = to_s5p_ehci(hcd); struct platform_device *pdev = to_platform_device(dev); s5p_ehci_clk_prepare_enable(s5p_ehci); if (s5p_ehci->otg) s5p_ehci->otg->set_host(s5p_ehci->otg, &hcd->self); if (s5p_ehci->phy) { usb_phy_init(s5p_ehci->phy); s5p_ehci->post_lpa_resume = 0; /* * We are going to change runtime status to active. * Make sure we get the phy only if we didn't get it before. */ if (pm_runtime_suspended(dev)) pm_runtime_get_sync(s5p_ehci->phy->dev); } else if (s5p_ehci->pdata->phy_init) { s5p_ehci->pdata->phy_init(pdev, USB_PHY_TYPE_HOST); } /* DMA burst Enable */ writel(EHCI_INSNREG00_ENABLE_DMA_BURST, EHCI_INSNREG00(hcd->regs)); ehci_resume(hcd, false); /* Update runtime PM status and clear runtime_error */ pm_runtime_disable(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); return 0; }
static int dwc3_resume(struct device *dev) { struct dwc3 *dwc = dev_get_drvdata(dev); unsigned long flags; /* Check if platform glue driver handling PM, if not then handle here */ if(!dwc3_notify_event(dwc, DWC3_CORE_PM_RESUME_EVENT)) return 0; usb_phy_init(dwc->usb3_phy); usb_phy_init(dwc->usb2_phy); msleep(100); spin_lock_irqsave(&dwc->lock, flags); dwc3_writel(dwc->regs, DWC3_GCTL, dwc->gctl); switch (dwc->mode) { case DWC3_MODE_DEVICE: case DWC3_MODE_DRD: dwc3_gadget_resume(dwc); /* FALLTHROUGH */ case DWC3_MODE_HOST: default: /* do nothing */ break; } spin_unlock_irqrestore(&dwc->lock, flags); pm_runtime_disable(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); return 0; }
static int sh_tmu_probe(struct platform_device *pdev) { struct sh_tmu_device *tmu = platform_get_drvdata(pdev); int ret; if (!is_early_platform_device(pdev)) { pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); } if (tmu) { dev_info(&pdev->dev, "kept as earlytimer\n"); goto out; } tmu = kzalloc(sizeof(*tmu), GFP_KERNEL); if (tmu == NULL) return -ENOMEM; ret = sh_tmu_setup(tmu, pdev); if (ret) { kfree(tmu); pm_runtime_idle(&pdev->dev); return ret; } if (is_early_platform_device(pdev)) return 0; out: if (tmu->has_clockevent || tmu->has_clocksource) pm_runtime_irq_safe(&pdev->dev); else pm_runtime_idle(&pdev->dev); return 0; }
/* * Register a new MMC card with the driver model. */ int mmc_add_card(struct mmc_card *card) { int ret; const char *type; const char *uhs_bus_speed_mode = ""; static const char *const uhs_speeds[] = { [UHS_SDR12_BUS_SPEED] = "SDR12 ", [UHS_SDR25_BUS_SPEED] = "SDR25 ", [UHS_SDR50_BUS_SPEED] = "SDR50 ", [UHS_SDR104_BUS_SPEED] = "SDR104 ", [UHS_DDR50_BUS_SPEED] = "DDR50 ", }; dev_set_name(&card->dev, "%s:%04x", mmc_hostname(card->host), card->rca); switch (card->type) { case MMC_TYPE_MMC: type = "MMC"; break; case MMC_TYPE_SD: type = "SD"; if (mmc_card_blockaddr(card)) { if (mmc_card_ext_capacity(card)) type = "SDXC"; else type = "SDHC"; } break; case MMC_TYPE_SDIO: type = "SDIO"; break; case MMC_TYPE_SD_COMBO: type = "SD-combo"; if (mmc_card_blockaddr(card)) type = "SDHC-combo"; break; default: type = "?"; break; } if (mmc_sd_card_uhs(card) && (card->sd_bus_speed < ARRAY_SIZE(uhs_speeds))) uhs_bus_speed_mode = uhs_speeds[card->sd_bus_speed]; if (mmc_host_is_spi(card->host)) { pr_info("%s: new %s%s%s card on SPI\n", mmc_hostname(card->host), mmc_card_highspeed(card) ? "high speed " : "", mmc_card_ddr_mode(card) ? "DDR " : "", type); } else { pr_info("%s: new %s%s%s%s%s card at address %04x\n", mmc_hostname(card->host), mmc_card_uhs(card) ? "ultra high speed " : (mmc_card_highspeed(card) ? "high speed " : ""), (mmc_card_hs200(card) ? "HS200 " : ""), mmc_card_ddr_mode(card) ? "DDR " : "", uhs_bus_speed_mode, type, card->rca); } #ifdef CONFIG_DEBUG_FS mmc_add_card_debugfs(card); #endif mmc_init_context_info(card->host); if (mmc_use_core_runtime_pm(card->host)) { ret = pm_runtime_set_active(&card->dev); if (ret) pr_err("%s: %s: failed setting runtime active: ret: %d\n", mmc_hostname(card->host), __func__, ret); else pm_runtime_enable(&card->dev); } ret = device_add(&card->dev); if (ret) return ret; if (mmc_use_core_runtime_pm(card->host)) { card->rpm_attrib.show = show_rpm_delay; card->rpm_attrib.store = store_rpm_delay; sysfs_attr_init(&card->rpm_attrib.attr); card->rpm_attrib.attr.name = "runtime_pm_timeout"; card->rpm_attrib.attr.mode = S_IRUGO | S_IWUSR; ret = device_create_file(&card->dev, &card->rpm_attrib); if (ret) pr_err("%s: %s: creating runtime pm sysfs entry: failed: %d\n", mmc_hostname(card->host), __func__, ret); /* Default timeout is 10 seconds */ card->idle_timeout = RUNTIME_SUSPEND_DELAY_MS; } mmc_card_set_present(card); return 0; }
static int __devinit apds990x_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct apds990x_chip *chip; int err; chip = kzalloc(sizeof *chip, GFP_KERNEL); if (!chip) return -ENOMEM; i2c_set_clientdata(client, chip); chip->client = client; init_waitqueue_head(&chip->wait); mutex_init(&chip->mutex); chip->pdata = client->dev.platform_data; if (chip->pdata == NULL) { dev_err(&client->dev, "platform data is mandatory\n"); err = -EINVAL; goto fail1; } if (chip->pdata->cf.ga == 0) { /* set uncovered sensor default parameters */ chip->cf.ga = 1966; /* 0.48 * APDS_PARAM_SCALE */ chip->cf.cf1 = 4096; /* 1.00 * APDS_PARAM_SCALE */ chip->cf.irf1 = 9134; /* 2.23 * APDS_PARAM_SCALE */ chip->cf.cf2 = 2867; /* 0.70 * APDS_PARAM_SCALE */ chip->cf.irf2 = 5816; /* 1.42 * APDS_PARAM_SCALE */ chip->cf.df = 52; } else { chip->cf = chip->pdata->cf; } /* precalculate inverse chip factors for threshold control */ chip->rcf.afactor = (chip->cf.irf1 - chip->cf.irf2) * APDS_PARAM_SCALE / (chip->cf.cf1 - chip->cf.cf2); chip->rcf.cf1 = APDS_PARAM_SCALE * APDS_PARAM_SCALE / chip->cf.cf1; chip->rcf.irf1 = chip->cf.irf1 * APDS_PARAM_SCALE / chip->cf.cf1; chip->rcf.cf2 = APDS_PARAM_SCALE * APDS_PARAM_SCALE / chip->cf.cf2; chip->rcf.irf2 = chip->cf.irf2 * APDS_PARAM_SCALE / chip->cf.cf2; /* Set something to start with */ chip->lux_thres_hi = APDS_LUX_DEF_THRES_HI; chip->lux_thres_lo = APDS_LUX_DEF_THRES_LO; chip->lux_calib = APDS_LUX_NEUTRAL_CALIB_VALUE; chip->prox_thres = APDS_PROX_DEF_THRES; chip->pdrive = chip->pdata->pdrive; chip->pdiode = APDS_PDIODE_IR; chip->pgain = APDS_PGAIN_1X; chip->prox_calib = APDS_PROX_NEUTRAL_CALIB_VALUE; chip->prox_persistence = APDS_DEFAULT_PROX_PERS; chip->prox_continuous_mode = false; chip->regs[0].supply = reg_vcc; chip->regs[1].supply = reg_vled; err = regulator_bulk_get(&client->dev, ARRAY_SIZE(chip->regs), chip->regs); if (err < 0) { dev_err(&client->dev, "Cannot get regulators\n"); goto fail1; } err = regulator_bulk_enable(ARRAY_SIZE(chip->regs), chip->regs); if (err < 0) { dev_err(&client->dev, "Cannot enable regulators\n"); goto fail2; } usleep_range(APDS_STARTUP_DELAY, 2 * APDS_STARTUP_DELAY); err = apds990x_detect(chip); if (err < 0) { dev_err(&client->dev, "APDS990X not found\n"); goto fail3; } pm_runtime_set_active(&client->dev); apds990x_configure(chip); apds990x_set_arate(chip, APDS_LUX_DEFAULT_RATE); apds990x_mode_on(chip); pm_runtime_enable(&client->dev); if (chip->pdata->setup_resources) { err = chip->pdata->setup_resources(); if (err) { err = -EINVAL; goto fail3; } } err = sysfs_create_group(&chip->client->dev.kobj, apds990x_attribute_group); if (err < 0) { dev_err(&chip->client->dev, "Sysfs registration failed\n"); goto fail4; } err = request_threaded_irq(client->irq, NULL, apds990x_irq, IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW | IRQF_ONESHOT, "apds990x", chip); if (err) { dev_err(&client->dev, "could not get IRQ %d\n", client->irq); goto fail5; } return err; fail5: sysfs_remove_group(&chip->client->dev.kobj, &apds990x_attribute_group[0]); fail4: if (chip->pdata && chip->pdata->release_resources) chip->pdata->release_resources(); fail3: regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs); fail2: regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs); fail1: kfree(chip); return err; }
int pcm512x_probe(struct device *dev, struct regmap *regmap) { struct pcm512x_priv *pcm512x; int i, ret; pcm512x = devm_kzalloc(dev, sizeof(struct pcm512x_priv), GFP_KERNEL); if (!pcm512x) return -ENOMEM; dev_set_drvdata(dev, pcm512x); pcm512x->regmap = regmap; for (i = 0; i < ARRAY_SIZE(pcm512x->supplies); i++) pcm512x->supplies[i].supply = pcm512x_supply_names[i]; ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(pcm512x->supplies), pcm512x->supplies); if (ret != 0) { dev_err(dev, "Failed to get supplies: %d\n", ret); return ret; } pcm512x->supply_nb[0].notifier_call = pcm512x_regulator_event_0; pcm512x->supply_nb[1].notifier_call = pcm512x_regulator_event_1; pcm512x->supply_nb[2].notifier_call = pcm512x_regulator_event_2; for (i = 0; i < ARRAY_SIZE(pcm512x->supplies); i++) { ret = regulator_register_notifier(pcm512x->supplies[i].consumer, &pcm512x->supply_nb[i]); if (ret != 0) { dev_err(dev, "Failed to register regulator notifier: %d\n", ret); } } ret = regulator_bulk_enable(ARRAY_SIZE(pcm512x->supplies), pcm512x->supplies); if (ret != 0) { dev_err(dev, "Failed to enable supplies: %d\n", ret); return ret; } /* Reset the device, verifying I/O in the process for I2C */ ret = regmap_write(regmap, PCM512x_RESET, PCM512x_RSTM | PCM512x_RSTR); if (ret != 0) { dev_err(dev, "Failed to reset device: %d\n", ret); goto err; } ret = regmap_write(regmap, PCM512x_RESET, 0); if (ret != 0) { dev_err(dev, "Failed to reset device: %d\n", ret); goto err; } pcm512x->sclk = devm_clk_get(dev, NULL); if (PTR_ERR(pcm512x->sclk) == -EPROBE_DEFER) return -EPROBE_DEFER; if (!IS_ERR(pcm512x->sclk)) { ret = clk_prepare_enable(pcm512x->sclk); if (ret != 0) { dev_err(dev, "Failed to enable SCLK: %d\n", ret); return ret; } } /* Default to standby mode */ ret = regmap_update_bits(pcm512x->regmap, PCM512x_POWER, PCM512x_RQST, PCM512x_RQST); if (ret != 0) { dev_err(dev, "Failed to request standby: %d\n", ret); goto err_clk; } pm_runtime_set_active(dev); pm_runtime_enable(dev); pm_runtime_idle(dev); #ifdef CONFIG_OF if (dev->of_node) { const struct device_node *np = dev->of_node; u32 val; if (of_property_read_u32(np, "pll-in", &val) >= 0) { if (val > 6) { dev_err(dev, "Invalid pll-in\n"); ret = -EINVAL; goto err_clk; } pcm512x->pll_in = val; } if (of_property_read_u32(np, "pll-out", &val) >= 0) { if (val > 6) { dev_err(dev, "Invalid pll-out\n"); ret = -EINVAL; goto err_clk; } pcm512x->pll_out = val; } if (!pcm512x->pll_in != !pcm512x->pll_out) { dev_err(dev, "Error: both pll-in and pll-out, or none\n"); ret = -EINVAL; goto err_clk; } if (pcm512x->pll_in && pcm512x->pll_in == pcm512x->pll_out) { dev_err(dev, "Error: pll-in == pll-out\n"); ret = -EINVAL; goto err_clk; } } #endif ret = snd_soc_register_codec(dev, &pcm512x_codec_driver, &pcm512x_dai, 1); if (ret != 0) { dev_err(dev, "Failed to register CODEC: %d\n", ret); goto err_pm; } return 0; err_pm: pm_runtime_disable(dev); err_clk: if (!IS_ERR(pcm512x->sclk)) clk_disable_unprepare(pcm512x->sclk); err: regulator_bulk_disable(ARRAY_SIZE(pcm512x->supplies), pcm512x->supplies); return ret; }
static int dw8250_probe(struct platform_device *pdev) { struct uart_8250_port uart = {}; struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); struct resource *irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); struct dw8250_data *data; int err; if (!regs || !irq) { dev_err(&pdev->dev, "no registers/irq defined\n"); return -EINVAL; } spin_lock_init(&uart.port.lock); uart.port.mapbase = regs->start; uart.port.irq = irq->start; uart.port.handle_irq = dw8250_handle_irq; uart.port.pm = dw8250_do_pm; uart.port.type = PORT_8250; uart.port.flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF | UPF_FIXED_PORT; uart.port.dev = &pdev->dev; uart.port.membase = devm_ioremap(&pdev->dev, regs->start, resource_size(regs)); if (!uart.port.membase) return -ENOMEM; data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; data->clk = devm_clk_get(&pdev->dev, NULL); if (!IS_ERR(data->clk)) { clk_prepare_enable(data->clk); uart.port.uartclk = clk_get_rate(data->clk); } uart.port.iotype = UPIO_MEM; uart.port.serial_in = dw8250_serial_in; uart.port.serial_out = dw8250_serial_out; uart.port.private_data = data; dw8250_setup_port(&uart); if (pdev->dev.of_node) { err = dw8250_probe_of(&uart.port); if (err) return err; } else if (ACPI_HANDLE(&pdev->dev)) { err = dw8250_probe_acpi(&uart); if (err) return err; } else { return -ENODEV; } data->line = serial8250_register_8250_port(&uart); if (data->line < 0) return data->line; platform_set_drvdata(pdev, data); pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); return 0; }
static int bcm_hsotgctrl_probe(struct platform_device *pdev) { int error = 0; unsigned int val; struct bcm_hsotgctrl_drv_data *hsotgctrl_drvdata; struct bcm_hsotgctrl_platform_data *plat_data = NULL; if (pdev->dev.platform_data) plat_data = (struct bcm_hsotgctrl_platform_data *) pdev->dev.platform_data; else if (pdev->dev.of_node) { int val; struct resource *resource; plat_data = kzalloc(sizeof(struct bcm_hsotgctrl_platform_data), GFP_KERNEL); if (!plat_data) { dev_err(&pdev->dev, "%s: memory allocation failed.", __func__); error = -ENOMEM; goto err_ret; } resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (resource->start) plat_data->hsotgctrl_virtual_mem_base = HW_IO_PHYS_TO_VIRT(resource->start); else { pr_info("Invalid hsotgctrl_virtual_mem_basei from DT\n"); goto err_read; } if (of_property_read_u32(pdev->dev.of_node, "chipreg-virtual-mem-base", &val)) { error = -EINVAL; dev_err(&pdev->dev, "chipreg-virtual-mem-base read failed\n"); goto err_read; } plat_data->chipreg_virtual_mem_base = HW_IO_PHYS_TO_VIRT(val); resource = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (resource->start) plat_data->irq = resource->start; else { pr_info("Invalid irq from DT\n"); goto err_read; } if (of_property_read_string(pdev->dev.of_node, "usb-ahb-clk-name", &plat_data->usb_ahb_clk_name) != 0) { error = -EINVAL; dev_err(&pdev->dev, "usb-ahb-clk-name read failed\n"); goto err_read; } if (of_property_read_string(pdev->dev.of_node, "mdio-mstr-clk-name", &plat_data->mdio_mstr_clk_name) != 0) { error = -EINVAL; dev_err(&pdev->dev, "mdio-mstr-clk-name read failed\n"); goto err_read; } } if (plat_data == NULL) { dev_err(&pdev->dev, "platform_data failed\n"); return -ENODEV; } hsotgctrl_drvdata = kzalloc(sizeof(*hsotgctrl_drvdata), GFP_KERNEL); if (!hsotgctrl_drvdata) { dev_warn(&pdev->dev, "Memory allocation failed\n"); return -ENOMEM; } local_hsotgctrl_handle = hsotgctrl_drvdata; hsotgctrl_drvdata->hsotg_ctrl_base = (void *)plat_data->hsotgctrl_virtual_mem_base; if (!hsotgctrl_drvdata->hsotg_ctrl_base) { dev_warn(&pdev->dev, "No vaddr for HSOTGCTRL!\n"); goto error_get_vaddr; } hsotgctrl_drvdata->chipregs_base = (void *)plat_data->chipreg_virtual_mem_base; if (!hsotgctrl_drvdata->chipregs_base) { dev_warn(&pdev->dev, "No vaddr for CHIPREG!\n"); goto error_get_vaddr; } hsotgctrl_drvdata->dev = &pdev->dev; hsotgctrl_drvdata->otg_clk = clk_get(NULL, plat_data->usb_ahb_clk_name); if (IS_ERR(hsotgctrl_drvdata->otg_clk)) { error = PTR_ERR(hsotgctrl_drvdata->otg_clk); dev_warn(&pdev->dev, "OTG clock allocation failed - %d\n", error); goto error_get_otg_clk; } hsotgctrl_drvdata->mdio_master_clk = clk_get(NULL, plat_data->mdio_mstr_clk_name); if (IS_ERR(hsotgctrl_drvdata->mdio_master_clk)) { error = PTR_ERR(hsotgctrl_drvdata->mdio_master_clk); dev_warn(&pdev->dev, "MDIO Mst clk alloc failed - %d\n", error); goto error_get_master_clk; } hsotgctrl_drvdata->allow_suspend = true; platform_set_drvdata(pdev, hsotgctrl_drvdata); bcm_hsotgctrl_en_clock(true); mdelay(HSOTGCTRL_STEP_DELAY_IN_MS); /* clear bit 15 RDB error */ val = readl(hsotgctrl_drvdata->hsotg_ctrl_base + HSOTG_CTRL_PHY_P1CTL_OFFSET); val &= ~HSOTG_CTRL_PHY_P1CTL_USB11_OEB_IS_TXEB_MASK; writel(val, hsotgctrl_drvdata->hsotg_ctrl_base + HSOTG_CTRL_PHY_P1CTL_OFFSET); mdelay(HSOTGCTRL_STEP_DELAY_IN_MS); /* S/W reset Phy, active low */ val = readl(hsotgctrl_drvdata->hsotg_ctrl_base + HSOTG_CTRL_PHY_P1CTL_OFFSET); val &= ~HSOTG_CTRL_PHY_P1CTL_SOFT_RESET_MASK; writel(val, hsotgctrl_drvdata->hsotg_ctrl_base + HSOTG_CTRL_PHY_P1CTL_OFFSET); mdelay(HSOTGCTRL_STEP_DELAY_IN_MS); /* bring Phy out of reset */ val = readl(hsotgctrl_drvdata->hsotg_ctrl_base + HSOTG_CTRL_PHY_P1CTL_OFFSET); val &= ~HSOTG_CTRL_PHY_P1CTL_PHY_MODE_MASK; val |= HSOTG_CTRL_PHY_P1CTL_SOFT_RESET_MASK; /* use OTG mode */ val |= PHY_MODE_OTG << HSOTG_CTRL_PHY_P1CTL_PHY_MODE_SHIFT; writel(val, hsotgctrl_drvdata->hsotg_ctrl_base + HSOTG_CTRL_PHY_P1CTL_OFFSET); mdelay(HSOTGCTRL_STEP_DELAY_IN_MS); /* Enable pad, internal PLL etc */ bcm_hsotgctrl_set_phy_off(false); mdelay(HSOTGCTRL_STEP_DELAY_IN_MS); /*Come up as device until we check PMU ID status * to avoid turning on Vbus before checking */ val = HSOTG_CTRL_USBOTGCONTROL_OTGSTAT_CTRL_MASK | HSOTG_CTRL_USBOTGCONTROL_UTMIOTG_IDDIG_SW_MASK | HSOTG_CTRL_USBOTGCONTROL_USB_HCLK_EN_DIRECT_MASK | HSOTG_CTRL_USBOTGCONTROL_USB_ON_IS_HCLK_EN_MASK | HSOTG_CTRL_USBOTGCONTROL_USB_ON_MASK | HSOTG_CTRL_USBOTGCONTROL_PRST_N_SW_MASK | HSOTG_CTRL_USBOTGCONTROL_HRESET_N_SW_MASK; writel(val, hsotgctrl_drvdata->hsotg_ctrl_base + HSOTG_CTRL_USBOTGCONTROL_OFFSET); mdelay(HSOTGCTRL_STEP_DELAY_IN_MS); error = device_create_file(&pdev->dev, &dev_attr_hsotgctrldump); if (error) { dev_warn(&pdev->dev, "Failed to create HOST file\n"); goto Error_bcm_hsotgctrl_probe; } #ifndef CONFIG_USB_OTG_UTILS /* Clear non-driving as default in case there * is no transceiver hookup */ bcm_hsotgctrl_phy_set_non_driving(false); #endif #ifdef CONFIG_NOP_USB_XCEIV /* Clear non-driving as default in case there * is no transceiver hookup */ bcm_hsotgctrl_phy_set_non_driving(false); #endif pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); hsotgctrl_drvdata->hsotgctrl_irq = platform_get_irq(pdev, 0); /* Create a work queue for wakeup work items */ hsotgctrl_drvdata->bcm_hsotgctrl_work_queue = create_workqueue("bcm_hsotgctrl_events"); if (hsotgctrl_drvdata->bcm_hsotgctrl_work_queue == NULL) { dev_warn(&pdev->dev, "BCM HSOTGCTRL events work queue creation failed\n"); /* Treat this as non-fatal error */ } INIT_DELAYED_WORK(&hsotgctrl_drvdata->wakeup_work, bcm_hsotgctrl_delayed_wakeup_handler); /* disable Bvalid interrupt bit * This interrupt is not currently used as the STAT2 detection * happens from the PMU side. Beacsue of not clearing this bit * Master clock gating feature was not working in Java. This * is not a issue in case of Hawaii * */ val = readl(hsotgctrl_drvdata->hsotg_ctrl_base + HSOTG_CTRL_USBOTGCONTROL_OFFSET); val |= 1 << HSOTG_CTRL_USBOTGCONTROL_BVALID_CLR_SHIFT; writel(val, hsotgctrl_drvdata->hsotg_ctrl_base + HSOTG_CTRL_USBOTGCONTROL_OFFSET); bcm_hsotgctrl_en_clock(false); /* request_irq enables irq */ hsotgctrl_drvdata->irq_enabled = true; error = request_irq(hsotgctrl_drvdata->hsotgctrl_irq, bcm_hsotgctrl_wake_irq, IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND, "bcm_hsotgctrl", (void *)hsotgctrl_drvdata); if (error) { hsotgctrl_drvdata->irq_enabled = false; hsotgctrl_drvdata->hsotgctrl_irq = 0; dev_warn(&pdev->dev, "Failed to request IRQ for wakeup\n"); } return 0; Error_bcm_hsotgctrl_probe: clk_put(hsotgctrl_drvdata->mdio_master_clk); bcm_hsotgctrl_en_clock(false); error_get_master_clk: clk_put(hsotgctrl_drvdata->otg_clk); error_get_otg_clk: error_get_vaddr: kfree(hsotgctrl_drvdata); err_read: if (pdev->dev.of_node) kfree(plat_data); err_ret: pr_err("%s probe failed\n", __func__); return error; }