static int sdhci_pm_suspend(struct device *dev) { int retval = 0; unsigned long flags; struct platform_device *pdev = container_of(dev, struct platform_device, dev); struct sdhci_host *host = platform_get_drvdata(pdev); struct mmc_host *mmc = host->mmc; #ifdef CONFIG_PM_RUNTIME if(pm_runtime_enabled(dev)) retval = pm_runtime_get_sync(dev); #endif if(retval >= 0) { retval = sdhci_suspend_host(host, PMSG_SUSPEND); if(!retval) { #ifdef CONFIG_MMC_HOST_WAKEUP_SUPPORTED if( !strcmp(host->hw_name, "Spread SDIO host1") ) { sdhci_host_wakeup_set(host); } #endif spin_lock_irqsave(&host->lock, flags); if(host->ops->set_clock) host->ops->set_clock(host, 0); spin_unlock_irqrestore(&host->lock, flags); } else { #ifdef CONFIG_PM_RUNTIME if(pm_runtime_enabled(dev)) pm_runtime_put_autosuspend(dev); #endif } } return retval; }
static int kbase_rk_power_on_callback(struct kbase_device *kbdev) { struct kbase_rk *kbase_rk = kbdev->platform_context; int ret = 1; /* Assume GPU has been powered off */ int error; if (kbase_rk->is_powered) { dev_warn(kbdev->dev, "called %s for already powered device\n", __func__); return 0; } dev_dbg(kbdev->dev, "%s: powering on\n", __func__); if (pm_runtime_enabled(kbdev->dev)) { error = pm_runtime_get_sync(kbdev->dev); if (error < 0) { dev_err(kbdev->dev, "failed to runtime resume device: %d\n", error); return error; } else if (error == 1) { /* * Let core know that the chip has not been * powered off, so we can save on re-initialization. */ ret = 0; } } else { error = kbase_rk_rt_power_on_callback(kbdev); if (error) return error; } error = clk_enable(kbase_rk->clk); if (error) { dev_err(kbdev->dev, "failed to enable clock: %d\n", error); if (pm_runtime_enabled(kbdev->dev)) pm_runtime_put_sync(kbdev->dev); else kbase_rk_rt_power_off_callback(kbdev); return error; } kbase_rk->is_powered = true; KBASE_TIMELINE_GPU_POWER(kbdev, 1); return ret; }
/** * __pm_generic_resume - Generic resume/restore callback for subsystems. * @dev: Device to handle. * @event: PM transition of the system under way. * @bool: Whether or not this is the "noirq" stage. * * Execute the resume/resotre callback provided by the @dev's driver, if * defined. If it returns 0, change the device's runtime PM status to 'active'. * Return the callback's error code. */ static int __pm_generic_resume(struct device *dev, int event, bool noirq) { const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; int (*callback)(struct device *); int ret; if (!pm) return 0; switch (event) { case PM_EVENT_RESUME: callback = noirq ? pm->resume_noirq : pm->resume; break; case PM_EVENT_RESTORE: callback = noirq ? pm->restore_noirq : pm->restore; break; default: callback = NULL; break; } if (!callback) return 0; ret = callback(dev); if (!ret && !noirq && pm_runtime_enabled(dev)) { pm_runtime_disable(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); } return ret; }
static int wl1271_sdio_power_on(struct wl1271 *wl) { struct sdio_func *func = wl_to_func(wl); int ret; /* If enabled, tell runtime PM not to power off the card */ if (pm_runtime_enabled(&func->dev)) { ret = pm_runtime_get_sync(&func->dev); if (ret) { printk (KERN_ERR "%s:pm_runtime_get_sync: %d\n", __func__, ret ); goto out; } } else { /* Runtime PM is disabled: power up the card manually */ ret = mmc_power_restore_host(func->card->host); if (ret < 0) { printk (KERN_ERR "%s:mmc_power_restore_host: %d\n", __func__, ret ); goto out; } } sdio_claim_host(func); sdio_enable_func(func); out: return ret; }
static int sirf_usp_pcm_remove(struct platform_device *pdev) { if (!pm_runtime_enabled(&pdev->dev)) sirf_usp_pcm_runtime_suspend(&pdev->dev); else pm_runtime_disable(&pdev->dev); return 0; }
void i915_rpm_enable(struct device *dev) { int cur_status = pm_runtime_enabled(dev); if (!cur_status) { pm_runtime_enable(dev); pm_runtime_allow(dev); } return; }
void i915_rpm_disable(struct drm_device *drm_dev) { struct device *dev = drm_dev->dev; int cur_status = pm_runtime_enabled(dev); if (cur_status) { pm_runtime_forbid(dev); pm_runtime_disable(dev); } return; }
static void hdmi_runtime_put(void) { int r; DSSDBG("hdmi_runtime_put\n"); if (!pm_runtime_enabled(&hdmi.pdev->dev)) return; r = pm_runtime_put_sync(&hdmi.pdev->dev); WARN_ON(r < 0); }
static int sdhci_pm_suspend(struct device *dev) { int retval = 0; struct platform_device *pdev = container_of(dev, struct platform_device, dev); struct sdhci_host *host = platform_get_drvdata(pdev); printk("%s: %s enter\n", mmc_hostname(host->mmc), __func__); if(is_wifi_slot(host)) { printk("[WLAN] suspend read REG_AP_CLK_AP_AHB_CFG is %x\n",__raw_readl(REG_AP_CLK_AP_AHB_CFG)); dhd_mmc_suspend = 1; printk("[WLAN] %s,dhd_mmc_suspend=%d\n",__func__,dhd_mmc_suspend); } #ifdef CONFIG_PM_RUNTIME if(pm_runtime_enabled(dev)) retval = pm_runtime_get_sync(dev); #endif if(retval >= 0) { retval = sdhci_suspend_host(host, PMSG_SUSPEND); if(!retval) { unsigned long flags; #ifdef CONFIG_MMC_HOST_WAKEUP_SUPPORTED if (pdev->id == 1) sdhci_host_wakeup_set(host); #endif spin_lock_irqsave(&host->lock, flags); if(host->ops->set_clock) host->ops->set_clock(host, 0); spin_unlock_irqrestore(&host->lock, flags); } else { #ifdef CONFIG_PM_RUNTIME if(pm_runtime_enabled(dev)) pm_runtime_put_autosuspend(dev); #endif } } printk("%s: %s leave retval %d\n", mmc_hostname(host->mmc), __func__, retval); return retval; }
static int hdmi_runtime_get(void) { int r; DSSDBG("hdmi_runtime_get\n"); if (!pm_runtime_enabled(&hdmi.pdev->dev)) return 0; r = pm_runtime_get_sync(&hdmi.pdev->dev); WARN_ON(r < 0); if (r < 0) return r; return 0; }
static int sirf_usp_pcm_probe(struct platform_device *pdev) { int ret; struct sirf_usp *usp; void __iomem *base; struct resource *mem_res; usp = devm_kzalloc(&pdev->dev, sizeof(struct sirf_usp), GFP_KERNEL); if (!usp) return -ENOMEM; platform_set_drvdata(pdev, usp); mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); base = devm_ioremap(&pdev->dev, mem_res->start, resource_size(mem_res)); if (base == NULL) return -ENOMEM; usp->regmap = devm_regmap_init_mmio(&pdev->dev, base, &sirf_usp_regmap_config); if (IS_ERR(usp->regmap)) return PTR_ERR(usp->regmap); usp->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(usp->clk)) { dev_err(&pdev->dev, "Get clock failed.\n"); return PTR_ERR(usp->clk); } pm_runtime_enable(&pdev->dev); if (!pm_runtime_enabled(&pdev->dev)) { ret = sirf_usp_pcm_runtime_resume(&pdev->dev); if (ret) return ret; } ret = devm_snd_soc_register_component(&pdev->dev, &sirf_usp_component, &sirf_usp_pcm_dai, 1); if (ret) { dev_err(&pdev->dev, "Register Audio SoC dai failed.\n"); return ret; } return devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0); }
static int wl1271_sdio_power_off(struct wl1271 *wl) { struct sdio_func *func = wl_to_func(wl); int ret; sdio_disable_func(func); sdio_release_host(func); /* Power off the card manually, even if runtime PM is enabled. */ ret = mmc_power_save_host(func->card->host); if (ret < 0) return ret; /* If enabled, let runtime PM know the card is powered off */ if (pm_runtime_enabled(&func->dev)) ret = pm_runtime_put_sync(&func->dev); return ret; }
static int sdhci_pm_resume(struct device *dev) { int retval = 0; unsigned long flags; struct platform_device *pdev = container_of(dev, struct platform_device, dev); struct sdhci_host *host = platform_get_drvdata(pdev); printk("%s: %s enter\n", mmc_hostname(host->mmc), __func__); if(is_wifi_slot(host) || is_cbp_slot(host) || is_sd_slot(host)) { sdhci_sprd_set_base_clock(host); } spin_lock_irqsave(&host->lock, flags); if(host->ops->set_clock) host->ops->set_clock(host, 1); spin_unlock_irqrestore(&host->lock, flags); if(is_wifi_slot(host)) { printk("[WLAN] resume read REG_AP_CLK_AP_AHB_CFG is %x\n",__raw_readl(REG_AP_CLK_AP_AHB_CFG)); printk("[WLAN] resume read REG_AP_CLK_SDIO1_CFG is %x\n",__raw_readl(REG_AP_CLK_SDIO1_CFG)); } #ifdef CONFIG_MMC_HOST_WAKEUP_SUPPORTED if (pdev->id == 1) sdhci_host_wakeup_clear(host); #endif retval = sdhci_resume_host(host); #ifdef CONFIG_PM_RUNTIME if(pm_runtime_enabled(dev)) pm_runtime_put_autosuspend(dev); #endif if(is_wifi_slot(host)) { dhd_mmc_suspend = 0; printk("[WLAN] %s,dhd_mmc_suspend=%d\n",__func__,dhd_mmc_suspend); } printk("%s: %s leave retval %d\n", mmc_hostname(host->mmc), __func__, retval); return retval; }
static void kbase_rk_power_off_callback(struct kbase_device *kbdev) { struct kbase_rk *kbase_rk = kbdev->platform_context; if (!kbase_rk->is_powered) { dev_warn(kbdev->dev, "called %s for powered off device\n", __func__); return; } dev_dbg(kbdev->dev, "%s: powering off\n", __func__); kbase_rk->is_powered = false; KBASE_TIMELINE_GPU_POWER(kbdev, 0); clk_disable(kbase_rk->clk); if (pm_runtime_enabled(kbdev->dev)) { pm_runtime_mark_last_busy(kbdev->dev); pm_runtime_put_autosuspend(kbdev->dev); } else { kbase_rk_rt_power_off_callback(kbdev); } }
static int rockchip_i2s_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; struct rk30_i2s_info *i2s; struct resource *mem, *memregion; u32 regs_base; int ret; I2S_DBG("%s()\n", __FUNCTION__); ret = of_property_read_u32(node, "i2s-id", &pdev->id); if (ret < 0) { dev_err(&pdev->dev, "%s() Can not read property: id\n", __FUNCTION__); ret = -ENOMEM; goto err; } if(pdev->id >= MAX_I2S) { dev_err(&pdev->dev, "id %d out of range\n", pdev->id); ret = -ENOMEM; goto err; } i2s = devm_kzalloc(&pdev->dev, sizeof(struct rk30_i2s_info), GFP_KERNEL); if (!i2s) { dev_err(&pdev->dev, "Can't allocate i2s info\n"); ret = -ENOMEM; goto err; } rk30_i2s = i2s; i2s->i2s_hclk = clk_get(&pdev->dev, "i2s_hclk"); if(IS_ERR(i2s->i2s_hclk) ) { dev_err(&pdev->dev, "get i2s_hclk failed.\n"); } else{ clk_prepare_enable(i2s->i2s_hclk); } i2s->i2s_clk= clk_get(&pdev->dev, "i2s_clk"); if (IS_ERR(i2s->i2s_clk)) { dev_err(&pdev->dev, "Can't retrieve i2s clock\n"); ret = PTR_ERR(i2s->i2s_clk); goto err; } #ifdef CLK_SET_lATER INIT_DELAYED_WORK(&i2s->clk_delayed_work, set_clk_later_work); schedule_delayed_work(&i2s->clk_delayed_work, msecs_to_jiffies(10)); #else clk_set_rate(i2s->iis_clk, 11289600); #endif clk_prepare_enable(i2s->i2s_clk); i2s->i2s_mclk= clk_get(&pdev->dev, "i2s_mclk"); if(IS_ERR(i2s->i2s_mclk) ) { printk("This platfrom have not i2s_mclk,no need to set i2s_mclk.\n"); }else{ #ifdef CLK_SET_lATER #else clk_set_rate(i2s->i2s_mclk, 11289600); #endif clk_prepare_enable(i2s->i2s_mclk); } mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(&pdev->dev, "No memory resource\n"); ret = -ENODEV; goto err_clk_put; } memregion = devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem), "rockchip-i2s"); if (!memregion) { dev_err(&pdev->dev, "Memory region already claimed\n"); ret = -EBUSY; goto err_clk_put; } i2s->regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); if (!i2s->regs) { dev_err(&pdev->dev, "ioremap failed\n"); ret = -ENOMEM; goto err_clk_put; } regs_base = mem->start; i2s->playback_dma_data.addr = regs_base + I2S_TXR_BUFF; i2s->playback_dma_data.addr_width = 4; i2s->playback_dma_data.maxburst = 1; i2s->capture_dma_data.addr = regs_base + I2S_RXR_BUFF; i2s->capture_dma_data.addr_width = 4; i2s->capture_dma_data.maxburst = 1; i2s->i2s_tx_status = false; i2s->i2s_rx_status = false; pm_runtime_enable(&pdev->dev); if (!pm_runtime_enabled(&pdev->dev)) { ret = rockchip_i2s_resume_noirq(&pdev->dev); if (ret) goto err_pm_disable; } //set dev name to driver->name.id for sound card register dev_set_name(&pdev->dev, "%s.%d", pdev->dev.driver->name, pdev->id); ret = snd_soc_register_component(&pdev->dev, &rockchip_i2s_component, &rockchip_i2s_dai[pdev->id], 1); if (ret) { dev_err(&pdev->dev, "Could not register DAI: %d\n", ret); ret = -ENOMEM; goto err_suspend; } ret = rockchip_pcm_platform_register(&pdev->dev); if (ret) { dev_err(&pdev->dev, "Could not register PCM: %d\n", ret); goto err_unregister_component; } /* Mark ourselves as in TXRX mode so we can run through our cleanup * process without warnings. */ rockchip_snd_txctrl(i2s, 0); rockchip_snd_rxctrl(i2s, 0); dev_set_drvdata(&pdev->dev, i2s); return 0; err_unregister_component: snd_soc_unregister_component(&pdev->dev); err_suspend: if (!pm_runtime_status_suspended(&pdev->dev)) rockchip_i2s_suspend_noirq(&pdev->dev); err_pm_disable: pm_runtime_disable(&pdev->dev); err_clk_put: clk_put(i2s->i2s_clk); err: return ret; }
static int tegra30_ahub_probe(struct platform_device *pdev) { const struct of_device_id *match; const struct tegra30_ahub_soc_data *soc_data; struct clk *clk; int i; struct resource *res0, *res1, *region; u32 of_dma[2]; void __iomem *regs_apbif, *regs_ahub; int ret = 0; if (ahub) return -ENODEV; match = of_match_device(tegra30_ahub_of_match, &pdev->dev); if (!match) return -EINVAL; soc_data = match->data; /* * The AHUB hosts a register bus: the "configlink". For this to * operate correctly, all devices on this bus must be out of reset. * Ensure that here. */ for (i = 0; i < ARRAY_SIZE(configlink_clocks); i++) { if (!(configlink_clocks[i].clk_list_mask & soc_data->clk_list_mask)) continue; clk = clk_get(&pdev->dev, configlink_clocks[i].clk_name); if (IS_ERR(clk)) { dev_err(&pdev->dev, "Can't get clock %s\n", configlink_clocks[i].clk_name); ret = PTR_ERR(clk); goto err; } tegra_periph_reset_deassert(clk); clk_put(clk); } ahub = devm_kzalloc(&pdev->dev, sizeof(struct tegra30_ahub), GFP_KERNEL); if (!ahub) { dev_err(&pdev->dev, "Can't allocate tegra30_ahub\n"); ret = -ENOMEM; goto err; } dev_set_drvdata(&pdev->dev, ahub); ahub->dev = &pdev->dev; ahub->clk_d_audio = clk_get(&pdev->dev, "d_audio"); if (IS_ERR(ahub->clk_d_audio)) { dev_err(&pdev->dev, "Can't retrieve ahub d_audio clock\n"); ret = PTR_ERR(ahub->clk_d_audio); goto err; } ahub->clk_apbif = clk_get(&pdev->dev, "apbif"); if (IS_ERR(ahub->clk_apbif)) { dev_err(&pdev->dev, "Can't retrieve ahub apbif clock\n"); ret = PTR_ERR(ahub->clk_apbif); goto err_clk_put_d_audio; } if (of_property_read_u32_array(pdev->dev.of_node, "nvidia,dma-request-selector", of_dma, 2) < 0) { dev_err(&pdev->dev, "Missing property nvidia,dma-request-selector\n"); ret = -ENODEV; goto err_clk_put_d_audio; } ahub->dma_sel = of_dma[1]; res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res0) { dev_err(&pdev->dev, "No apbif memory resource\n"); ret = -ENODEV; goto err_clk_put_apbif; } region = devm_request_mem_region(&pdev->dev, res0->start, resource_size(res0), DRV_NAME); if (!region) { dev_err(&pdev->dev, "request region apbif failed\n"); ret = -EBUSY; goto err_clk_put_apbif; } ahub->apbif_addr = res0->start; regs_apbif = devm_ioremap(&pdev->dev, res0->start, resource_size(res0)); if (!regs_apbif) { dev_err(&pdev->dev, "ioremap apbif failed\n"); ret = -ENOMEM; goto err_clk_put_apbif; } ahub->regmap_apbif = devm_regmap_init_mmio(&pdev->dev, regs_apbif, &tegra30_ahub_apbif_regmap_config); if (IS_ERR(ahub->regmap_apbif)) { dev_err(&pdev->dev, "apbif regmap init failed\n"); ret = PTR_ERR(ahub->regmap_apbif); goto err_clk_put_apbif; } regcache_cache_only(ahub->regmap_apbif, true); res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!res1) { dev_err(&pdev->dev, "No ahub memory resource\n"); ret = -ENODEV; goto err_clk_put_apbif; } region = devm_request_mem_region(&pdev->dev, res1->start, resource_size(res1), DRV_NAME); if (!region) { dev_err(&pdev->dev, "request region ahub failed\n"); ret = -EBUSY; goto err_clk_put_apbif; } regs_ahub = devm_ioremap(&pdev->dev, res1->start, resource_size(res1)); if (!regs_ahub) { dev_err(&pdev->dev, "ioremap ahub failed\n"); ret = -ENOMEM; goto err_clk_put_apbif; } ahub->regmap_ahub = devm_regmap_init_mmio(&pdev->dev, regs_ahub, &tegra30_ahub_ahub_regmap_config); if (IS_ERR(ahub->regmap_ahub)) { dev_err(&pdev->dev, "ahub regmap init failed\n"); ret = PTR_ERR(ahub->regmap_ahub); goto err_clk_put_apbif; } regcache_cache_only(ahub->regmap_ahub, true); pm_runtime_enable(&pdev->dev); if (!pm_runtime_enabled(&pdev->dev)) { ret = tegra30_ahub_runtime_resume(&pdev->dev); if (ret) goto err_pm_disable; } of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); return 0; err_pm_disable: pm_runtime_disable(&pdev->dev); err_clk_put_apbif: clk_put(ahub->clk_apbif); err_clk_put_d_audio: clk_put(ahub->clk_d_audio); ahub = NULL; err: return ret; }
static __devinit int tegra20_i2s_platform_probe(struct platform_device *pdev) { struct tegra20_i2s *i2s; struct resource *mem, *memregion, *dmareq; u32 of_dma[2]; u32 dma_ch; void __iomem *regs; int ret; i2s = devm_kzalloc(&pdev->dev, sizeof(struct tegra20_i2s), GFP_KERNEL); if (!i2s) { dev_err(&pdev->dev, "Can't allocate tegra20_i2s\n"); ret = -ENOMEM; goto err; } dev_set_drvdata(&pdev->dev, i2s); i2s->dai = tegra20_i2s_dai_template; i2s->dai.name = dev_name(&pdev->dev); i2s->clk_i2s = clk_get(&pdev->dev, NULL); if (IS_ERR(i2s->clk_i2s)) { dev_err(&pdev->dev, "Can't retrieve i2s clock\n"); ret = PTR_ERR(i2s->clk_i2s); goto err; } mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(&pdev->dev, "No memory resource\n"); ret = -ENODEV; goto err_clk_put; } dmareq = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (!dmareq) { if (of_property_read_u32_array(pdev->dev.of_node, "nvidia,dma-request-selector", of_dma, 2) < 0) { dev_err(&pdev->dev, "No DMA resource\n"); ret = -ENODEV; goto err_clk_put; } dma_ch = of_dma[1]; } else { dma_ch = dmareq->start; } memregion = devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem), DRV_NAME); if (!memregion) { dev_err(&pdev->dev, "Memory region already claimed\n"); ret = -EBUSY; goto err_clk_put; } regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); if (!regs) { dev_err(&pdev->dev, "ioremap failed\n"); ret = -ENOMEM; goto err_clk_put; } i2s->regmap = devm_regmap_init_mmio(&pdev->dev, regs, &tegra20_i2s_regmap_config); if (IS_ERR(i2s->regmap)) { dev_err(&pdev->dev, "regmap init failed\n"); ret = PTR_ERR(i2s->regmap); goto err_clk_put; } i2s->capture_dma_data.addr = mem->start + TEGRA20_I2S_FIFO2; i2s->capture_dma_data.wrap = 4; i2s->capture_dma_data.width = 32; i2s->capture_dma_data.req_sel = dma_ch; i2s->playback_dma_data.addr = mem->start + TEGRA20_I2S_FIFO1; i2s->playback_dma_data.wrap = 4; i2s->playback_dma_data.width = 32; i2s->playback_dma_data.req_sel = dma_ch; i2s->reg_ctrl = TEGRA20_I2S_CTRL_FIFO_FORMAT_PACKED; pm_runtime_enable(&pdev->dev); if (!pm_runtime_enabled(&pdev->dev)) { ret = tegra20_i2s_runtime_resume(&pdev->dev); if (ret) goto err_pm_disable; } ret = snd_soc_register_dai(&pdev->dev, &i2s->dai); if (ret) { dev_err(&pdev->dev, "Could not register DAI: %d\n", ret); ret = -ENOMEM; goto err_suspend; } ret = tegra_pcm_platform_register(&pdev->dev); if (ret) { dev_err(&pdev->dev, "Could not register PCM: %d\n", ret); goto err_unregister_dai; } return 0; err_unregister_dai: snd_soc_unregister_dai(&pdev->dev); err_suspend: if (!pm_runtime_status_suspended(&pdev->dev)) tegra20_i2s_runtime_suspend(&pdev->dev); err_pm_disable: pm_runtime_disable(&pdev->dev); err_clk_put: clk_put(i2s->clk_i2s); err: return ret; }
static int mt6797_afe_pcm_dev_probe(struct platform_device *pdev) { struct mtk_base_afe *afe; struct mt6797_afe_private *afe_priv; struct resource *res; struct device *dev; int i, irq_id, ret; afe = devm_kzalloc(&pdev->dev, sizeof(*afe), GFP_KERNEL); if (!afe) return -ENOMEM; afe->platform_priv = devm_kzalloc(&pdev->dev, sizeof(*afe_priv), GFP_KERNEL); if (!afe->platform_priv) return -ENOMEM; afe_priv = afe->platform_priv; afe->dev = &pdev->dev; dev = afe->dev; /* initial audio related clock */ ret = mt6797_init_clock(afe); if (ret) { dev_err(dev, "init clock error\n"); return ret; } /* regmap init */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); afe->base_addr = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(afe->base_addr)) return PTR_ERR(afe->base_addr); afe->regmap = devm_regmap_init_mmio(&pdev->dev, afe->base_addr, &mt6797_afe_regmap_config); if (IS_ERR(afe->regmap)) return PTR_ERR(afe->regmap); /* init memif */ afe->memif_size = MT6797_MEMIF_NUM; afe->memif = devm_kcalloc(dev, afe->memif_size, sizeof(*afe->memif), GFP_KERNEL); if (!afe->memif) return -ENOMEM; for (i = 0; i < afe->memif_size; i++) { afe->memif[i].data = &memif_data[i]; afe->memif[i].irq_usage = -1; } mutex_init(&afe->irq_alloc_lock); /* irq initialize */ afe->irqs_size = MT6797_IRQ_NUM; afe->irqs = devm_kcalloc(dev, afe->irqs_size, sizeof(*afe->irqs), GFP_KERNEL); if (!afe->irqs) return -ENOMEM; for (i = 0; i < afe->irqs_size; i++) afe->irqs[i].irq_data = &irq_data[i]; /* request irq */ irq_id = platform_get_irq(pdev, 0); if (!irq_id) { dev_err(dev, "%s no irq found\n", dev->of_node->name); return -ENXIO; } ret = devm_request_irq(dev, irq_id, mt6797_afe_irq_handler, IRQF_TRIGGER_NONE, "asys-isr", (void *)afe); if (ret) { dev_err(dev, "could not request_irq for asys-isr\n"); return ret; } /* init sub_dais */ INIT_LIST_HEAD(&afe->sub_dais); for (i = 0; i < ARRAY_SIZE(dai_register_cbs); i++) { ret = dai_register_cbs[i](afe); if (ret) { dev_warn(afe->dev, "dai register i %d fail, ret %d\n", i, ret); return ret; } } /* init dai_driver and component_driver */ ret = mtk_afe_combine_sub_dai(afe); if (ret) { dev_warn(afe->dev, "mtk_afe_combine_sub_dai fail, ret %d\n", ret); return ret; } afe->mtk_afe_hardware = &mt6797_afe_hardware; afe->memif_fs = mt6797_memif_fs; afe->irq_fs = mt6797_irq_fs; afe->runtime_resume = mt6797_afe_runtime_resume; afe->runtime_suspend = mt6797_afe_runtime_suspend; platform_set_drvdata(pdev, afe); pm_runtime_enable(dev); if (!pm_runtime_enabled(dev)) goto err_pm_disable; pm_runtime_get_sync(&pdev->dev); /* register component */ ret = devm_snd_soc_register_component(dev, &mt6797_afe_component, NULL, 0); if (ret) { dev_warn(dev, "err_platform\n"); goto err_pm_disable; } ret = devm_snd_soc_register_component(afe->dev, &mt6797_afe_pcm_dai_component, afe->dai_drivers, afe->num_dai_drivers); if (ret) { dev_warn(dev, "err_dai_component\n"); goto err_pm_disable; } return 0; err_pm_disable: pm_runtime_disable(dev); return ret; }
static int fimc_lite_probe(struct platform_device *pdev) { struct flite_drvdata *drv_data = NULL; struct device *dev = &pdev->dev; const struct of_device_id *of_id; struct fimc_lite *fimc; struct resource *res; int ret; if (!dev->of_node) return -ENODEV; fimc = devm_kzalloc(dev, sizeof(*fimc), GFP_KERNEL); if (!fimc) return -ENOMEM; of_id = of_match_node(flite_of_match, dev->of_node); if (of_id) drv_data = (struct flite_drvdata *)of_id->data; fimc->index = of_alias_get_id(dev->of_node, "fimc-lite"); if (!drv_data || fimc->index >= drv_data->num_instances || fimc->index < 0) { dev_err(dev, "Wrong %s node alias\n", dev->of_node->full_name); return -EINVAL; } fimc->dd = drv_data; fimc->pdev = pdev; init_waitqueue_head(&fimc->irq_queue); spin_lock_init(&fimc->slock); mutex_init(&fimc->lock); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); fimc->regs = devm_ioremap_resource(dev, res); if (IS_ERR(fimc->regs)) return PTR_ERR(fimc->regs); res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (res == NULL) { dev_err(dev, "Failed to get IRQ resource\n"); return -ENXIO; } ret = fimc_lite_clk_get(fimc); if (ret) return ret; ret = devm_request_irq(dev, res->start, flite_irq_handler, 0, dev_name(dev), fimc); if (ret) { dev_err(dev, "Failed to install irq (%d)\n", ret); goto err_clk_put; } /* The video node will be created within the subdev's registered() op */ ret = fimc_lite_create_capture_subdev(fimc); if (ret) goto err_clk_put; platform_set_drvdata(pdev, fimc); pm_runtime_enable(dev); if (!pm_runtime_enabled(dev)) { ret = clk_enable(fimc->clock); if (ret < 0) goto err_sd; } fimc->alloc_ctx = vb2_dma_contig_init_ctx(dev); if (IS_ERR(fimc->alloc_ctx)) { ret = PTR_ERR(fimc->alloc_ctx); goto err_clk_dis; } fimc_lite_set_default_config(fimc); dev_dbg(dev, "FIMC-LITE.%d registered successfully\n", fimc->index); return 0; err_clk_dis: if (!pm_runtime_enabled(dev)) clk_disable(fimc->clock); err_sd: fimc_lite_unregister_capture_subdev(fimc); err_clk_put: fimc_lite_clk_put(fimc); return ret; }
static int tegra30_i2s_platform_probe(struct platform_device *pdev) { struct tegra30_i2s *i2s; const struct of_device_id *match; u32 cif_ids[2]; struct resource *mem, *memregion; void __iomem *regs; int ret; i2s = devm_kzalloc(&pdev->dev, sizeof(struct tegra30_i2s), GFP_KERNEL); if (!i2s) { dev_err(&pdev->dev, "Can't allocate tegra30_i2s\n"); ret = -ENOMEM; goto err; } dev_set_drvdata(&pdev->dev, i2s); match = of_match_device(tegra30_i2s_of_match, &pdev->dev); if (!match) { dev_err(&pdev->dev, "Error: No device match found\n"); ret = -ENODEV; goto err; } i2s->soc_data = (struct tegra30_i2s_soc_data *)match->data; i2s->dai = tegra30_i2s_dai_template; i2s->dai.name = dev_name(&pdev->dev); ret = of_property_read_u32_array(pdev->dev.of_node, "nvidia,ahub-cif-ids", cif_ids, ARRAY_SIZE(cif_ids)); if (ret < 0) goto err; i2s->playback_i2s_cif = cif_ids[0]; i2s->capture_i2s_cif = cif_ids[1]; i2s->clk_i2s = clk_get(&pdev->dev, NULL); if (IS_ERR(i2s->clk_i2s)) { dev_err(&pdev->dev, "Can't retrieve i2s clock\n"); ret = PTR_ERR(i2s->clk_i2s); goto err; } mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(&pdev->dev, "No memory resource\n"); ret = -ENODEV; goto err_clk_put; } memregion = devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem), DRV_NAME); if (!memregion) { dev_err(&pdev->dev, "Memory region already claimed\n"); ret = -EBUSY; goto err_clk_put; } regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); if (!regs) { dev_err(&pdev->dev, "ioremap failed\n"); ret = -ENOMEM; goto err_clk_put; } i2s->regmap = devm_regmap_init_mmio(&pdev->dev, regs, &tegra30_i2s_regmap_config); if (IS_ERR(i2s->regmap)) { dev_err(&pdev->dev, "regmap init failed\n"); ret = PTR_ERR(i2s->regmap); goto err_clk_put; } regcache_cache_only(i2s->regmap, true); pm_runtime_enable(&pdev->dev); if (!pm_runtime_enabled(&pdev->dev)) { ret = tegra30_i2s_runtime_resume(&pdev->dev); if (ret) goto err_pm_disable; } i2s->playback_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; i2s->playback_dma_data.maxburst = 4; ret = tegra30_ahub_allocate_tx_fifo(&i2s->playback_fifo_cif, i2s->playback_dma_chan, sizeof(i2s->playback_dma_chan), &i2s->playback_dma_data.addr); if (ret) { dev_err(&pdev->dev, "Could not alloc TX FIFO: %d\n", ret); goto err_suspend; } ret = tegra30_ahub_set_rx_cif_source(i2s->playback_i2s_cif, i2s->playback_fifo_cif); if (ret) { dev_err(&pdev->dev, "Could not route TX FIFO: %d\n", ret); goto err_free_tx_fifo; } i2s->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; i2s->capture_dma_data.maxburst = 4; ret = tegra30_ahub_allocate_rx_fifo(&i2s->capture_fifo_cif, i2s->capture_dma_chan, sizeof(i2s->capture_dma_chan), &i2s->capture_dma_data.addr); if (ret) { dev_err(&pdev->dev, "Could not alloc RX FIFO: %d\n", ret); goto err_unroute_tx_fifo; } ret = tegra30_ahub_set_rx_cif_source(i2s->capture_fifo_cif, i2s->capture_i2s_cif); if (ret) { dev_err(&pdev->dev, "Could not route TX FIFO: %d\n", ret); goto err_free_rx_fifo; } ret = snd_soc_register_component(&pdev->dev, &tegra30_i2s_component, &i2s->dai, 1); if (ret) { dev_err(&pdev->dev, "Could not register DAI: %d\n", ret); ret = -ENOMEM; goto err_unroute_rx_fifo; } ret = tegra_pcm_platform_register_with_chan_names(&pdev->dev, &i2s->dma_config, i2s->playback_dma_chan, i2s->capture_dma_chan); if (ret) { dev_err(&pdev->dev, "Could not register PCM: %d\n", ret); goto err_unregister_component; } return 0; err_unregister_component: snd_soc_unregister_component(&pdev->dev); err_unroute_rx_fifo: tegra30_ahub_unset_rx_cif_source(i2s->capture_fifo_cif); err_free_rx_fifo: tegra30_ahub_free_rx_fifo(i2s->capture_fifo_cif); err_unroute_tx_fifo: tegra30_ahub_unset_rx_cif_source(i2s->playback_i2s_cif); err_free_tx_fifo: tegra30_ahub_free_tx_fifo(i2s->playback_fifo_cif); err_suspend: if (!pm_runtime_status_suspended(&pdev->dev)) tegra30_i2s_runtime_suspend(&pdev->dev); err_pm_disable: pm_runtime_disable(&pdev->dev); err_clk_put: clk_put(i2s->clk_i2s); err: return ret; }
static int sun8i_codec_probe(struct platform_device *pdev) { struct resource *res_base; struct sun8i_codec *scodec; void __iomem *base; int ret; scodec = devm_kzalloc(&pdev->dev, sizeof(*scodec), GFP_KERNEL); if (!scodec) return -ENOMEM; scodec->dev = &pdev->dev; scodec->clk_module = devm_clk_get(&pdev->dev, "mod"); if (IS_ERR(scodec->clk_module)) { dev_err(&pdev->dev, "Failed to get the module clock\n"); return PTR_ERR(scodec->clk_module); } scodec->clk_bus = devm_clk_get(&pdev->dev, "bus"); if (IS_ERR(scodec->clk_bus)) { dev_err(&pdev->dev, "Failed to get the bus clock\n"); return PTR_ERR(scodec->clk_bus); } res_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); base = devm_ioremap_resource(&pdev->dev, res_base); if (IS_ERR(base)) { dev_err(&pdev->dev, "Failed to map the registers\n"); return PTR_ERR(base); } scodec->regmap = devm_regmap_init_mmio(&pdev->dev, base, &sun8i_codec_regmap_config); if (IS_ERR(scodec->regmap)) { dev_err(&pdev->dev, "Failed to create our regmap\n"); return PTR_ERR(scodec->regmap); } platform_set_drvdata(pdev, scodec); pm_runtime_enable(&pdev->dev); if (!pm_runtime_enabled(&pdev->dev)) { ret = sun8i_codec_runtime_resume(&pdev->dev); if (ret) goto err_pm_disable; } ret = snd_soc_register_codec(&pdev->dev, &sun8i_soc_codec, &sun8i_codec_dai, 1); if (ret) { dev_err(&pdev->dev, "Failed to register codec\n"); goto err_suspend; } return ret; err_suspend: if (!pm_runtime_status_suspended(&pdev->dev)) sun8i_codec_runtime_suspend(&pdev->dev); err_pm_disable: pm_runtime_disable(&pdev->dev); return ret; }
static int rockchip_i2s_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; struct rk_i2s_dev *i2s; struct snd_soc_dai_driver *soc_dai; struct resource *res; void __iomem *regs; int ret; int val; i2s = devm_kzalloc(&pdev->dev, sizeof(*i2s), GFP_KERNEL); if (!i2s) { dev_err(&pdev->dev, "Can't allocate rk_i2s_dev\n"); return -ENOMEM; } /* try to prepare related clocks */ i2s->hclk = devm_clk_get(&pdev->dev, "i2s_hclk"); if (IS_ERR(i2s->hclk)) { dev_err(&pdev->dev, "Can't retrieve i2s bus clock\n"); return PTR_ERR(i2s->hclk); } ret = clk_prepare_enable(i2s->hclk); if (ret) { dev_err(i2s->dev, "hclock enable failed %d\n", ret); return ret; } i2s->mclk = devm_clk_get(&pdev->dev, "i2s_clk"); if (IS_ERR(i2s->mclk)) { dev_err(&pdev->dev, "Can't retrieve i2s master clock\n"); return PTR_ERR(i2s->mclk); } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(regs)) return PTR_ERR(regs); i2s->regmap = devm_regmap_init_mmio(&pdev->dev, regs, &rockchip_i2s_regmap_config); if (IS_ERR(i2s->regmap)) { dev_err(&pdev->dev, "Failed to initialise managed register map\n"); return PTR_ERR(i2s->regmap); } i2s->playback_dma_data.addr = res->start + I2S_TXDR; i2s->playback_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; i2s->playback_dma_data.maxburst = 4; i2s->capture_dma_data.addr = res->start + I2S_RXDR; i2s->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; i2s->capture_dma_data.maxburst = 4; i2s->dev = &pdev->dev; dev_set_drvdata(&pdev->dev, i2s); pm_runtime_enable(&pdev->dev); if (!pm_runtime_enabled(&pdev->dev)) { ret = i2s_runtime_resume(&pdev->dev); if (ret) goto err_pm_disable; } soc_dai = devm_kzalloc(&pdev->dev, sizeof(*soc_dai), GFP_KERNEL); if (!soc_dai) return -ENOMEM; memcpy(soc_dai, &rockchip_i2s_dai, sizeof(*soc_dai)); if (!of_property_read_u32(node, "rockchip,playback-channels", &val)) { if (val >= 2 && val <= 8) soc_dai->playback.channels_max = val; } if (!of_property_read_u32(node, "rockchip,capture-channels", &val)) { if (val >= 2 && val <= 8) soc_dai->capture.channels_max = val; } ret = devm_snd_soc_register_component(&pdev->dev, &rockchip_i2s_component, soc_dai, 1); if (ret) { dev_err(&pdev->dev, "Could not register DAI\n"); goto err_suspend; } ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0); if (ret) { dev_err(&pdev->dev, "Could not register PCM\n"); return ret; } return 0; err_suspend: if (!pm_runtime_status_suspended(&pdev->dev)) i2s_runtime_suspend(&pdev->dev); err_pm_disable: pm_runtime_disable(&pdev->dev); return ret; }
static int mdss_mdp_probe(struct platform_device *pdev) { struct resource *res; int rc; struct mdss_data_type *mdata; if (!pdev->dev.of_node) { pr_err("MDP driver only supports device tree probe\n"); return -ENOTSUPP; } if (mdss_res) { pr_err("MDP already initialized\n"); return -EINVAL; } mdata = devm_kzalloc(&pdev->dev, sizeof(*mdata), GFP_KERNEL); if (mdata == NULL) return -ENOMEM; pdev->id = 0; mdata->pdev = pdev; platform_set_drvdata(pdev, mdata); mdss_res = mdata; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mdp_phys"); if (!res) { pr_err("unable to get MDP base address\n"); rc = -ENOMEM; goto probe_done; } mdata->mdp_reg_size = resource_size(res); mdata->mdp_base = devm_ioremap(&pdev->dev, res->start, mdata->mdp_reg_size); if (unlikely(!mdata->mdp_base)) { pr_err("unable to map MDP base\n"); rc = -ENOMEM; goto probe_done; } pr_info("MDP HW Base phy_Address=0x%x virt=0x%x\n", (int) res->start, (int) mdata->mdp_base); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vbif_phys"); if (!res) { pr_err("unable to get MDSS VBIF base address\n"); rc = -ENOMEM; goto probe_done; } mdata->vbif_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (unlikely(!mdata->vbif_base)) { pr_err("unable to map MDSS VBIF base\n"); rc = -ENOMEM; goto probe_done; } pr_info("MDSS VBIF HW Base phy_Address=0x%x virt=0x%x\n", (int) res->start, (int) mdata->vbif_base); res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { pr_err("unable to get MDSS irq\n"); rc = -ENOMEM; goto probe_done; } mdata->irq = res->start; mdss_mdp_hw.ptr = mdata; /*populate hw iomem base info from device tree*/ rc = mdss_mdp_parse_dt(pdev); if (rc) { pr_err("unable to parse device tree\n"); goto probe_done; } rc = mdss_mdp_res_init(mdata); if (rc) { pr_err("unable to initialize mdss mdp resources\n"); goto probe_done; } rc = mdss_mdp_pp_init(&pdev->dev); if (rc) { pr_err("unable to initialize mdss pp resources\n"); goto probe_done; } rc = mdss_mdp_bus_scale_register(mdata); if (rc) { pr_err("unable to register bus scaling\n"); goto probe_done; } mdss_mdp_bus_scale_set_quota(AB_QUOTA, IB_QUOTA); rc = mdss_mdp_debug_init(mdata); if (rc) { pr_err("unable to initialize mdp debugging\n"); goto probe_done; } pm_runtime_set_suspended(&pdev->dev); pm_runtime_enable(&pdev->dev); if (!pm_runtime_enabled(&pdev->dev)) mdss_mdp_footswitch_ctrl(mdata, true); rc = mdss_mdp_register_sysfs(mdata); if (rc) pr_err("unable to register mdp sysfs nodes\n"); rc = mdss_fb_register_mdp_instance(&mdp5); if (rc) pr_err("unable to register mdp instance\n"); rc = mdss_register_irq(&mdss_mdp_hw); if (rc) pr_err("mdss_register_irq failed.\n"); probe_done: if (IS_ERR_VALUE(rc)) { mdss_mdp_hw.ptr = NULL; mdss_res = NULL; mdss_mdp_pp_term(&pdev->dev); } return rc; }
static int tegra30_i2s_platform_probe(struct platform_device *pdev) { struct tegra30_i2s *i2s; u32 cif_ids[2]; struct resource *mem, *memregion; void __iomem *regs; int ret; i2s = devm_kzalloc(&pdev->dev, sizeof(struct tegra30_i2s), GFP_KERNEL); if (!i2s) { dev_err(&pdev->dev, "Can't allocate tegra30_i2s\n"); ret = -ENOMEM; goto err; } dev_set_drvdata(&pdev->dev, i2s); i2s->dai = tegra30_i2s_dai_template; i2s->dai.name = dev_name(&pdev->dev); ret = of_property_read_u32_array(pdev->dev.of_node, "nvidia,ahub-cif-ids", cif_ids, ARRAY_SIZE(cif_ids)); if (ret < 0) goto err; i2s->playback_i2s_cif = cif_ids[0]; i2s->capture_i2s_cif = cif_ids[1]; i2s->clk_i2s = clk_get(&pdev->dev, NULL); if (IS_ERR(i2s->clk_i2s)) { dev_err(&pdev->dev, "Can't retrieve i2s clock\n"); ret = PTR_ERR(i2s->clk_i2s); goto err; } mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(&pdev->dev, "No memory resource\n"); ret = -ENODEV; goto err_clk_put; } memregion = devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem), DRV_NAME); if (!memregion) { dev_err(&pdev->dev, "Memory region already claimed\n"); ret = -EBUSY; goto err_clk_put; } regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); if (!regs) { dev_err(&pdev->dev, "ioremap failed\n"); ret = -ENOMEM; goto err_clk_put; } i2s->regmap = devm_regmap_init_mmio(&pdev->dev, regs, &tegra30_i2s_regmap_config); if (IS_ERR(i2s->regmap)) { dev_err(&pdev->dev, "regmap init failed\n"); ret = PTR_ERR(i2s->regmap); goto err_clk_put; } regcache_cache_only(i2s->regmap, true); pm_runtime_enable(&pdev->dev); if (!pm_runtime_enabled(&pdev->dev)) { ret = tegra30_i2s_runtime_resume(&pdev->dev); if (ret) goto err_pm_disable; } ret = snd_soc_register_dai(&pdev->dev, &i2s->dai); if (ret) { dev_err(&pdev->dev, "Could not register DAI: %d\n", ret); ret = -ENOMEM; goto err_suspend; } ret = tegra_pcm_platform_register(&pdev->dev); if (ret) { dev_err(&pdev->dev, "Could not register PCM: %d\n", ret); goto err_unregister_dai; } return 0; err_unregister_dai: snd_soc_unregister_dai(&pdev->dev); err_suspend: if (!pm_runtime_status_suspended(&pdev->dev)) tegra30_i2s_runtime_suspend(&pdev->dev); err_pm_disable: pm_runtime_disable(&pdev->dev); err_clk_put: clk_put(i2s->clk_i2s); err: return ret; }
static int rockchip_i2s_probe(struct platform_device *pdev) { struct rk_i2s_dev *i2s; struct resource *res; void __iomem *regs; int ret; i2s = devm_kzalloc(&pdev->dev, sizeof(*i2s), GFP_KERNEL); if (!i2s) { dev_err(&pdev->dev, "Can't allocate rk_i2s_dev\n"); return -ENOMEM; } /* try to prepare related clocks */ i2s->hclk = devm_clk_get(&pdev->dev, "i2s_hclk"); if (IS_ERR(i2s->hclk)) { dev_err(&pdev->dev, "Can't retrieve i2s bus clock\n"); return PTR_ERR(i2s->hclk); } i2s->mclk = devm_clk_get(&pdev->dev, "i2s_clk"); if (IS_ERR(i2s->mclk)) { dev_err(&pdev->dev, "Can't retrieve i2s master clock\n"); return PTR_ERR(i2s->mclk); } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(regs)) return PTR_ERR(regs); i2s->regmap = devm_regmap_init_mmio(&pdev->dev, regs, &rockchip_i2s_regmap_config); if (IS_ERR(i2s->regmap)) { dev_err(&pdev->dev, "Failed to initialise managed register map\n"); return PTR_ERR(i2s->regmap); } i2s->playback_dma_data.addr = res->start + I2S_TXDR; i2s->playback_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; i2s->playback_dma_data.maxburst = 16; i2s->capture_dma_data.addr = res->start + I2S_RXDR; i2s->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; i2s->capture_dma_data.maxburst = 16; i2s->dev = &pdev->dev; dev_set_drvdata(&pdev->dev, i2s); pm_runtime_enable(&pdev->dev); if (!pm_runtime_enabled(&pdev->dev)) { ret = i2s_runtime_resume(&pdev->dev); if (ret) goto err_pm_disable; } ret = devm_snd_soc_register_component(&pdev->dev, &rockchip_i2s_component, &rockchip_i2s_dai, 1); if (ret) { dev_err(&pdev->dev, "Could not register DAI\n"); goto err_suspend; } ret = snd_dmaengine_pcm_register(&pdev->dev, NULL, 0); if (ret) { dev_err(&pdev->dev, "Could not register PCM\n"); goto err_pcm_register; } return 0; err_pcm_register: snd_dmaengine_pcm_unregister(&pdev->dev); err_suspend: if (!pm_runtime_status_suspended(&pdev->dev)) i2s_runtime_suspend(&pdev->dev); err_pm_disable: pm_runtime_disable(&pdev->dev); return ret; }
static int tegra20_spdif_platform_probe(struct platform_device *pdev) { struct tegra20_spdif *spdif; struct resource *mem, *memregion, *dmareq; void __iomem *regs; int ret; spdif = devm_kzalloc(&pdev->dev, sizeof(struct tegra20_spdif), GFP_KERNEL); if (!spdif) { dev_err(&pdev->dev, "Can't allocate tegra20_spdif\n"); ret = -ENOMEM; goto err; } dev_set_drvdata(&pdev->dev, spdif); spdif->clk_spdif_out = clk_get(&pdev->dev, "spdif_out"); if (IS_ERR(spdif->clk_spdif_out)) { pr_err("Can't retrieve spdif clock\n"); ret = PTR_ERR(spdif->clk_spdif_out); goto err; } mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(&pdev->dev, "No memory resource\n"); ret = -ENODEV; goto err_clk_put; } dmareq = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (!dmareq) { dev_err(&pdev->dev, "No DMA resource\n"); ret = -ENODEV; goto err_clk_put; } memregion = devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem), DRV_NAME); if (!memregion) { dev_err(&pdev->dev, "Memory region already claimed\n"); ret = -EBUSY; goto err_clk_put; } regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); if (!regs) { dev_err(&pdev->dev, "ioremap failed\n"); ret = -ENOMEM; goto err_clk_put; } spdif->regmap = devm_regmap_init_mmio(&pdev->dev, regs, &tegra20_spdif_regmap_config); if (IS_ERR(spdif->regmap)) { dev_err(&pdev->dev, "regmap init failed\n"); ret = PTR_ERR(spdif->regmap); goto err_clk_put; } spdif->playback_dma_data.addr = mem->start + TEGRA20_SPDIF_DATA_OUT; spdif->playback_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; spdif->playback_dma_data.maxburst = 4; spdif->playback_dma_data.slave_id = dmareq->start; pm_runtime_enable(&pdev->dev); if (!pm_runtime_enabled(&pdev->dev)) { ret = tegra20_spdif_runtime_resume(&pdev->dev); if (ret) goto err_pm_disable; } ret = snd_soc_register_component(&pdev->dev, &tegra20_spdif_component, &tegra20_spdif_dai, 1); if (ret) { dev_err(&pdev->dev, "Could not register DAI: %d\n", ret); ret = -ENOMEM; goto err_suspend; } ret = tegra_pcm_platform_register(&pdev->dev); if (ret) { dev_err(&pdev->dev, "Could not register PCM: %d\n", ret); goto err_unregister_component; } return 0; err_unregister_component: snd_soc_unregister_component(&pdev->dev); err_suspend: if (!pm_runtime_status_suspended(&pdev->dev)) tegra20_spdif_runtime_suspend(&pdev->dev); err_pm_disable: pm_runtime_disable(&pdev->dev); err_clk_put: clk_put(spdif->clk_spdif_out); err: return ret; }
static int tegra30_dam_probe(struct platform_device *pdev) { struct resource *res, *region; struct tegra30_dam_context *dam; int ret = 0; int clkm_rate; u32 val32; if (pdev->dev.of_node) { of_property_read_u32(pdev->dev.of_node, "nvidia,ahub-dam-id", &val32); pdev->id = (int)val32; } if ((pdev->id < 0) || (pdev->id >= TEGRA30_NR_DAM_IFC)) { dev_err(&pdev->dev, "ID %d out of range\n", pdev->id); return -EINVAL; } dams_cont_info[pdev->id] = devm_kzalloc(&pdev->dev, sizeof(struct tegra30_dam_context), GFP_KERNEL); if (!dams_cont_info[pdev->id]) { dev_err(&pdev->dev, "Can't allocate dam context\n"); ret = -ENOMEM; goto exit; } dams_cont_info[pdev->id]->dev = &pdev->dev; dam = dams_cont_info[pdev->id]; dev_set_drvdata(&pdev->dev, dam); dam->dam_clk = clk_get(&pdev->dev, NULL); if (IS_ERR(dam->dam_clk)) { dev_err(&pdev->dev, "Can't retrieve dam clock\n"); ret = PTR_ERR(dam->dam_clk); goto err_free; } clkm_rate = clk_get_rate(clk_get_parent(dam->dam_clk)); while (clkm_rate > 13000000) clkm_rate >>= 1; clk_set_rate(dam->dam_clk,clkm_rate); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "No memory 0 resource\n"); ret = -ENODEV; goto err_clk_put_dam; } region = devm_request_mem_region(&pdev->dev, res->start, resource_size(res), pdev->name); if (!region) { dev_err(&pdev->dev, "Memory region 0 already claimed\n"); ret = -EBUSY; goto err_clk_put_dam; } dam->damregs = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!dam->damregs) { dev_err(&pdev->dev, "ioremap 0 failed\n"); ret = -ENOMEM; goto err_clk_put_dam; } dam->regmap = devm_regmap_init_mmio(&pdev->dev, dam->damregs, &tegra30_dam_regmap_config); if (IS_ERR(dam->regmap)) { dev_err(&pdev->dev, "regmap init failed\n"); ret = PTR_ERR(dam->regmap); goto err_clk_put_dam; } regcache_cache_only(dam->regmap, true); pm_runtime_enable(&pdev->dev); if (!pm_runtime_enabled(&pdev->dev)) { ret = tegra30_dam_runtime_resume(&pdev->dev); if (ret) goto err_pm_disable; } tegra30_dam_debug_add(dam, pdev->id); return 0; err_pm_disable: pm_runtime_disable(&pdev->dev); err_clk_put_dam: clk_put(dam->dam_clk); err_free: dams_cont_info[pdev->id] = NULL; exit: return ret; }
bool i915_pm_runtime_enabled(struct device *dev) { return pm_runtime_enabled(dev); }
static int tegra20_i2s_platform_probe(struct platform_device *pdev) { struct tegra20_i2s *i2s; struct resource *mem; void __iomem *regs; int ret; i2s = devm_kzalloc(&pdev->dev, sizeof(struct tegra20_i2s), GFP_KERNEL); if (!i2s) { ret = -ENOMEM; goto err; } dev_set_drvdata(&pdev->dev, i2s); i2s->dai = tegra20_i2s_dai_template; i2s->dai.name = dev_name(&pdev->dev); i2s->clk_i2s = clk_get(&pdev->dev, NULL); if (IS_ERR(i2s->clk_i2s)) { dev_err(&pdev->dev, "Can't retrieve i2s clock\n"); ret = PTR_ERR(i2s->clk_i2s); goto err; } mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); regs = devm_ioremap_resource(&pdev->dev, mem); if (IS_ERR(regs)) { ret = PTR_ERR(regs); goto err_clk_put; } i2s->regmap = devm_regmap_init_mmio(&pdev->dev, regs, &tegra20_i2s_regmap_config); if (IS_ERR(i2s->regmap)) { dev_err(&pdev->dev, "regmap init failed\n"); ret = PTR_ERR(i2s->regmap); goto err_clk_put; } i2s->capture_dma_data.addr = mem->start + TEGRA20_I2S_FIFO2; i2s->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; i2s->capture_dma_data.maxburst = 4; i2s->playback_dma_data.addr = mem->start + TEGRA20_I2S_FIFO1; i2s->playback_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; i2s->playback_dma_data.maxburst = 4; pm_runtime_enable(&pdev->dev); if (!pm_runtime_enabled(&pdev->dev)) { ret = tegra20_i2s_runtime_resume(&pdev->dev); if (ret) goto err_pm_disable; } ret = snd_soc_register_component(&pdev->dev, &tegra20_i2s_component, &i2s->dai, 1); if (ret) { dev_err(&pdev->dev, "Could not register DAI: %d\n", ret); ret = -ENOMEM; goto err_suspend; } ret = tegra_pcm_platform_register(&pdev->dev); if (ret) { dev_err(&pdev->dev, "Could not register PCM: %d\n", ret); goto err_unregister_component; } return 0; err_unregister_component: snd_soc_unregister_component(&pdev->dev); err_suspend: if (!pm_runtime_status_suspended(&pdev->dev)) tegra20_i2s_runtime_suspend(&pdev->dev); err_pm_disable: pm_runtime_disable(&pdev->dev); err_clk_put: clk_put(i2s->clk_i2s); err: return ret; }
static int tegra20_spdif_platform_probe(struct platform_device *pdev) { struct tegra20_spdif *spdif; struct resource *mem, *memregion, *dmareq; void __iomem *regs; int ret; u32 reg_val; spdif = devm_kzalloc(&pdev->dev, sizeof(struct tegra20_spdif), GFP_KERNEL); if (!spdif) { dev_err(&pdev->dev, "Can't allocate tegra20_spdif\n"); ret = -ENOMEM; goto err; } dev_set_drvdata(&pdev->dev, spdif); spdif->clk_spdif_out = clk_get(&pdev->dev, "spdif_out"); if (IS_ERR(spdif->clk_spdif_out)) { pr_err("Can't retrieve spdif clock\n"); ret = PTR_ERR(spdif->clk_spdif_out); goto err; } mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(&pdev->dev, "No memory resource\n"); ret = -ENODEV; goto err_clk_put; } dmareq = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (!dmareq) { dev_err(&pdev->dev, "No DMA resource\n"); ret = -ENODEV; goto err_clk_put; } memregion = devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem), DRV_NAME); if (!memregion) { dev_err(&pdev->dev, "Memory region already claimed\n"); ret = -EBUSY; goto err_clk_put; } regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); if (!regs) { dev_err(&pdev->dev, "ioremap failed\n"); ret = -ENOMEM; goto err_clk_put; } spdif->regmap = devm_regmap_init_mmio(&pdev->dev, regs, &tegra20_spdif_regmap_config); if (IS_ERR(spdif->regmap)) { dev_err(&pdev->dev, "regmap init failed\n"); ret = PTR_ERR(spdif->regmap); goto err_clk_put; } spdif->playback_dma_data.addr = mem->start + TEGRA20_SPDIF_DATA_OUT; spdif->playback_dma_data.wrap = 4; spdif->playback_dma_data.width = 32; spdif->playback_dma_data.req_sel = dmareq->start; pm_runtime_enable(&pdev->dev); if (!pm_runtime_enabled(&pdev->dev)) { ret = tegra20_spdif_runtime_resume(&pdev->dev); if (ret) goto err_pm_disable; } clk_enable(spdif->clk_spdif_out); reg_val = tegra20_spdif_read(spdif, TEGRA20_SPDIF_DATA_FIFO_CSR); reg_val &= ~TEGRA20_SPDIF_DATA_FIFO_CSR_TX_ATN_LVL_MASK; reg_val |= TEGRA20_SPDIF_DATA_FIFO_CSR_TX_ATN_LVL_TU4_WORD_FULL; tegra20_spdif_write(spdif, TEGRA20_SPDIF_DATA_FIFO_CSR, reg_val); clk_disable(spdif->clk_spdif_out); ret = snd_soc_register_dai(&pdev->dev, &tegra20_spdif_dai); if (ret) { dev_err(&pdev->dev, "Could not register DAI: %d\n", ret); ret = -ENOMEM; goto err_suspend; } ret = tegra_pcm_platform_register(&pdev->dev); if (ret) { dev_err(&pdev->dev, "Could not register PCM: %d\n", ret); goto err_unregister_dai; } return 0; err_unregister_dai: snd_soc_unregister_dai(&pdev->dev); err_suspend: if (!pm_runtime_status_suspended(&pdev->dev)) tegra20_spdif_runtime_suspend(&pdev->dev); err_pm_disable: pm_runtime_disable(&pdev->dev); err_clk_put: clk_put(spdif->clk_spdif_out); err: return ret; }