int __init exynos_cpufreq_cluster1_init(struct exynos_dvfs_info *info) { unsigned long rate; struct device_node *pmic_node; int ret, tmp; set_volt_table_CA57(); mout_atlas_pll = clk_get(NULL, "mout_atlas_pll"); if (IS_ERR(mout_atlas_pll)) { pr_err("failed get mout_atlas_pll clk\n"); goto err_mout_atlas_pll; } mout_atlas = clk_get(NULL, "mout_atlas"); if (IS_ERR(mout_atlas)) { pr_err("failed get mout_atlas clk\n"); goto err_mout_atlas; } if (clk_set_parent(mout_atlas, mout_atlas_pll)) { pr_err("Unable to set parent %s of clock %s.\n", mout_atlas_pll->name, mout_atlas->name); goto err_clk_set_parent_atlas; } mout_bus0_pll_atlas = clk_get(NULL, "mout_bus0_pll_atlas"); if (IS_ERR(mout_bus0_pll_atlas)) { pr_err("failed get mout_bus0_pll_atlas clk\n"); goto err_mout_bus0_pll_atlas; } if (clk_prepare_enable(mout_atlas_pll) || clk_prepare_enable(mout_atlas)) { pr_err("Unable to enable Atlas clocks \n"); goto err_clk_prepare_enable; } rate = clk_get_rate(mout_bus0_pll_atlas) / 1000; info->mpll_freq_khz = rate; info->pll_safe_idx = L17; info->max_support_idx = max_support_idx_CA57; info->min_support_idx = min_support_idx_CA57; /* booting frequency is 1.7GHz */ info->boot_cpu_min_qos = exynos7420_freq_table_CA57[L8].frequency; info->boot_cpu_max_qos = exynos7420_freq_table_CA57[L8].frequency; info->bus_table = exynos7420_bus_table_CA57; info->cpu_clk = mout_atlas_pll; /* reboot limit frequency is 800MHz */ info->reboot_limit_freq = exynos7420_freq_table_CA57[L17].frequency; info->volt_table = exynos7420_volt_table_CA57; info->abb_table = NULL; //exynos7420_abb_table_CA57; info->freq_table = exynos7420_freq_table_CA57; info->set_freq = exynos7420_set_frequency_CA57; info->need_apll_change = exynos7420_pms_change_CA57; info->is_alive = exynos7420_is_alive_CA57; info->set_ema = exynos7420_set_ema_CA57; pmic_node = of_find_compatible_node(NULL, NULL, "samsung,s2mps15-pmic"); if (!pmic_node) { pr_err("%s: faile to get pmic dt_node\n", __func__); } else { ret = of_property_read_u32(pmic_node, "smpl_warn_en", &en_smpl_warn); if (ret) pr_err("%s: faile to get Property of smpl_warn_en\n", __func__); } if (en_smpl_warn) { info->check_smpl = exynos7420_check_smpl_CA57; /* ATLAS_RATIO_SMPL */ tmp = __raw_readl(EXYNOS7420_ATLAS_SMPL_CTRL0); tmp &= 0x7F; tmp |= 0x44; __raw_writel(tmp, EXYNOS7420_ATLAS_SMPL_CTRL0); pr_info("%s SMPL_WARN ENABLE (DIV:%d) ", __func__, tmp&0x3F); exynos_cpufreq_smpl_warn_register_notifier(&exynos7420_cpufreq_smpl_warn_notifier); } return 0; err_clk_prepare_enable: err_mout_bus0_pll_atlas: err_clk_set_parent_atlas: clk_put(mout_atlas); err_mout_atlas: clk_put(mout_atlas_pll); err_mout_atlas_pll: pr_debug("%s: failed initialization\n", __func__); return -EINVAL; }
static int sdhci_tegra_probe(struct platform_device *pdev) { const struct of_device_id *match; const struct sdhci_tegra_soc_data *soc_data; struct sdhci_host *host; struct sdhci_pltfm_host *pltfm_host; struct sdhci_tegra *tegra_host; struct clk *clk; int rc; match = of_match_device(sdhci_tegra_dt_match, &pdev->dev); if (!match) return -EINVAL; soc_data = match->data; host = sdhci_pltfm_init(pdev, soc_data->pdata); if (IS_ERR(host)) return PTR_ERR(host); pltfm_host = sdhci_priv(host); tegra_host = devm_kzalloc(&pdev->dev, sizeof(*tegra_host), GFP_KERNEL); if (!tegra_host) { dev_err(mmc_dev(host->mmc), "failed to allocate tegra_host\n"); rc = -ENOMEM; goto err_alloc_tegra_host; } tegra_host->soc_data = soc_data; pltfm_host->priv = tegra_host; sdhci_tegra_parse_dt(&pdev->dev, tegra_host); if (gpio_is_valid(tegra_host->power_gpio)) { rc = gpio_request(tegra_host->power_gpio, "sdhci_power"); if (rc) { dev_err(mmc_dev(host->mmc), "failed to allocate power gpio\n"); goto err_power_req; } gpio_direction_output(tegra_host->power_gpio, 1); } if (gpio_is_valid(tegra_host->cd_gpio)) { rc = gpio_request(tegra_host->cd_gpio, "sdhci_cd"); if (rc) { dev_err(mmc_dev(host->mmc), "failed to allocate cd gpio\n"); goto err_cd_req; } gpio_direction_input(tegra_host->cd_gpio); rc = request_irq(gpio_to_irq(tegra_host->cd_gpio), carddetect_irq, IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, mmc_hostname(host->mmc), host); if (rc) { dev_err(mmc_dev(host->mmc), "request irq error\n"); goto err_cd_irq_req; } } if (gpio_is_valid(tegra_host->wp_gpio)) { rc = gpio_request(tegra_host->wp_gpio, "sdhci_wp"); if (rc) { dev_err(mmc_dev(host->mmc), "failed to allocate wp gpio\n"); goto err_wp_req; } gpio_direction_input(tegra_host->wp_gpio); } clk = clk_get(mmc_dev(host->mmc), NULL); if (IS_ERR(clk)) { dev_err(mmc_dev(host->mmc), "clk err\n"); rc = PTR_ERR(clk); goto err_clk_get; } clk_prepare_enable(clk); pltfm_host->clk = clk; if (tegra_host->is_8bit) host->mmc->caps |= MMC_CAP_8_BIT_DATA; rc = sdhci_add_host(host); if (rc) goto err_add_host; return 0; err_add_host: clk_disable_unprepare(pltfm_host->clk); clk_put(pltfm_host->clk); err_clk_get: if (gpio_is_valid(tegra_host->wp_gpio)) gpio_free(tegra_host->wp_gpio); err_wp_req: if (gpio_is_valid(tegra_host->cd_gpio)) free_irq(gpio_to_irq(tegra_host->cd_gpio), host); err_cd_irq_req: if (gpio_is_valid(tegra_host->cd_gpio)) gpio_free(tegra_host->cd_gpio); err_cd_req: if (gpio_is_valid(tegra_host->power_gpio)) gpio_free(tegra_host->power_gpio); err_power_req: err_alloc_tegra_host: sdhci_pltfm_free(pdev); return rc; }
/** * vpbe_initialize() - Initialize the vpbe display controller * @vpbe_dev - vpbe device ptr * * Master frame buffer device drivers calls this to initialize vpbe * display controller. This will then registers v4l2 device and the sub * devices and sets a current encoder sub device for display. v4l2 display * device driver is the master and frame buffer display device driver is * the slave. Frame buffer display driver checks the initialized during * probe and exit if not initialized. Returns status. */ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev) { struct encoder_config_info *enc_info; struct amp_config_info *amp_info; struct v4l2_subdev **enc_subdev; struct osd_state *osd_device; struct i2c_adapter *i2c_adap; int num_encoders; int ret = 0; int err; int i; /* * v4l2 abd FBDev frame buffer devices will get the vpbe_dev pointer * from the platform device by iteration of platform drivers and * matching with device name */ if (NULL == vpbe_dev || NULL == dev) { printk(KERN_ERR "Null device pointers.\n"); return -ENODEV; } if (vpbe_dev->initialized) return 0; mutex_lock(&vpbe_dev->lock); if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0) { /* We have dac clock available for platform */ vpbe_dev->dac_clk = clk_get(vpbe_dev->pdev, "vpss_dac"); if (IS_ERR(vpbe_dev->dac_clk)) { ret = PTR_ERR(vpbe_dev->dac_clk); goto fail_mutex_unlock; } if (clk_prepare_enable(vpbe_dev->dac_clk)) { ret = -ENODEV; goto fail_mutex_unlock; } } /* first enable vpss clocks */ vpss_enable_clock(VPSS_VPBE_CLOCK, 1); /* First register a v4l2 device */ ret = v4l2_device_register(dev, &vpbe_dev->v4l2_dev); if (ret) { v4l2_err(dev->driver, "Unable to register v4l2 device.\n"); goto fail_clk_put; } v4l2_info(&vpbe_dev->v4l2_dev, "vpbe v4l2 device registered\n"); err = bus_for_each_dev(&platform_bus_type, NULL, vpbe_dev, platform_device_get); if (err < 0) { ret = err; goto fail_dev_unregister; } vpbe_dev->venc = venc_sub_dev_init(&vpbe_dev->v4l2_dev, vpbe_dev->cfg->venc.module_name); /* register venc sub device */ if (vpbe_dev->venc == NULL) { v4l2_err(&vpbe_dev->v4l2_dev, "vpbe unable to init venc sub device\n"); ret = -ENODEV; goto fail_dev_unregister; } /* initialize osd device */ osd_device = vpbe_dev->osd_device; if (NULL != osd_device->ops.initialize) { err = osd_device->ops.initialize(osd_device); if (err) { v4l2_err(&vpbe_dev->v4l2_dev, "unable to initialize the OSD device"); err = -ENOMEM; goto fail_dev_unregister; } } /* * Register any external encoders that are configured. At index 0 we * store venc sd index. */ num_encoders = vpbe_dev->cfg->num_ext_encoders + 1; vpbe_dev->encoders = kmalloc( sizeof(struct v4l2_subdev *)*num_encoders, GFP_KERNEL); if (NULL == vpbe_dev->encoders) { v4l2_err(&vpbe_dev->v4l2_dev, "unable to allocate memory for encoders sub devices"); ret = -ENOMEM; goto fail_dev_unregister; } i2c_adap = i2c_get_adapter(vpbe_dev->cfg->i2c_adapter_id); for (i = 0; i < (vpbe_dev->cfg->num_ext_encoders + 1); i++) { if (i == 0) { /* venc is at index 0 */ enc_subdev = &vpbe_dev->encoders[i]; *enc_subdev = vpbe_dev->venc; continue; } enc_info = &vpbe_dev->cfg->ext_encoders[i]; if (enc_info->is_i2c) { enc_subdev = &vpbe_dev->encoders[i]; *enc_subdev = v4l2_i2c_new_subdev_board( &vpbe_dev->v4l2_dev, i2c_adap, &enc_info->board_info, NULL); if (*enc_subdev) v4l2_info(&vpbe_dev->v4l2_dev, "v4l2 sub device %s registered\n", enc_info->module_name); else { v4l2_err(&vpbe_dev->v4l2_dev, "encoder %s" " failed to register", enc_info->module_name); ret = -ENODEV; goto fail_kfree_encoders; } } else v4l2_warn(&vpbe_dev->v4l2_dev, "non-i2c encoders" " currently not supported"); } /* Add amplifier subdevice for dm365 */ if ((strcmp(vpbe_dev->cfg->module_name, "dm365-vpbe-display") == 0) && vpbe_dev->cfg->amp != NULL) { amp_info = vpbe_dev->cfg->amp; if (amp_info->is_i2c) { vpbe_dev->amp = v4l2_i2c_new_subdev_board( &vpbe_dev->v4l2_dev, i2c_adap, &_info->board_info, NULL); if (!vpbe_dev->amp) { v4l2_err(&vpbe_dev->v4l2_dev, "amplifier %s failed to register", amp_info->module_name); ret = -ENODEV; goto fail_kfree_encoders; } v4l2_info(&vpbe_dev->v4l2_dev, "v4l2 sub device %s registered\n", amp_info->module_name); } else { vpbe_dev->amp = NULL; v4l2_warn(&vpbe_dev->v4l2_dev, "non-i2c amplifiers" " currently not supported"); } } else { vpbe_dev->amp = NULL; } /* set the current encoder and output to that of venc by default */ vpbe_dev->current_sd_index = 0; vpbe_dev->current_out_index = 0; mutex_unlock(&vpbe_dev->lock); printk(KERN_NOTICE "Setting default output to %s\n", def_output); ret = vpbe_set_default_output(vpbe_dev); if (ret) { v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set default output %s", def_output); return ret; } printk(KERN_NOTICE "Setting default mode to %s\n", def_mode); ret = vpbe_set_default_mode(vpbe_dev); if (ret) { v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set default mode %s", def_mode); return ret; } vpbe_dev->initialized = 1; /* TBD handling of bootargs for default output and mode */ return 0; fail_kfree_encoders: kfree(vpbe_dev->encoders); fail_dev_unregister: v4l2_device_unregister(&vpbe_dev->v4l2_dev); fail_clk_put: if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0) { clk_disable_unprepare(vpbe_dev->dac_clk); clk_put(vpbe_dev->dac_clk); } fail_mutex_unlock: mutex_unlock(&vpbe_dev->lock); return ret; }
int s5p_mfc_power_off(struct s5p_mfc_dev *dev) { #if defined(CONFIG_SOC_EXYNOS5430) || defined(CONFIG_SOC_EXYNOS5433) struct clk *clk_child = NULL; struct clk *clk_parent = NULL; #endif #if defined(CONFIG_SOC_EXYNOS5430) struct clk *clk_fout_mphy_pll = NULL; #endif #if defined(CONFIG_SOC_EXYNOS5433) struct clk *clk_old_parent = NULL; #endif int ret; MFC_TRACE_DEV("++ Power off\n"); #if defined(CONFIG_SOC_EXYNOS5422) bts_initialize("pd-mfc", false); #endif #if defined(CONFIG_SOC_EXYNOS5430) if (dev->id == 0) { clk_fout_mphy_pll = clk_get(dev->device, "fout_mphy_pll"); if (IS_ERR(clk_fout_mphy_pll)) { pr_err("failed to get %s clock\n", __clk_get_name(clk_fout_mphy_pll)); return PTR_ERR(clk_fout_mphy_pll); } clk_child = clk_get(dev->device, "mout_mphy_pll"); if (IS_ERR(clk_child)) { clk_put(clk_fout_mphy_pll); pr_err("failed to get %s clock\n", __clk_get_name(clk_child)); return PTR_ERR(clk_child); } clk_parent = clk_get(dev->device, "fin_pll"); if (IS_ERR(clk_parent)) { clk_put(clk_child); clk_put(clk_fout_mphy_pll); pr_err("failed to get %s clock\n", __clk_get_name(clk_parent)); return PTR_ERR(clk_parent); } /* 1. Set parent as OSC */ clk_set_parent(clk_child, clk_parent); /* 2. Disable MPHY_PLL */ clk_disable_unprepare(clk_fout_mphy_pll); } #endif #if defined(CONFIG_SOC_EXYNOS5433) clk_old_parent = clk_get(dev->device, "aclk_mfc_400"); if (IS_ERR(clk_old_parent)) { pr_err("failed to get %s clock\n", __clk_get_name(clk_old_parent)); return PTR_ERR(clk_old_parent); } clk_child = clk_get(dev->device, "mout_aclk_mfc_400_user"); if (IS_ERR(clk_child)) { clk_put(clk_old_parent); pr_err("failed to get %s clock\n", __clk_get_name(clk_child)); return PTR_ERR(clk_child); } clk_parent = clk_get(dev->device, "oscclk"); if (IS_ERR(clk_parent)) { clk_put(clk_child); clk_put(clk_old_parent); pr_err("failed to get %s clock\n", __clk_get_name(clk_parent)); return PTR_ERR(clk_parent); } /* before set mux register, all source clock have to enabled */ clk_prepare_enable(clk_parent); if (clk_set_parent(clk_child, clk_parent)) { pr_err("Unable to set parent %s of clock %s \n", __clk_get_name(clk_parent), __clk_get_name(clk_child)); } clk_disable_unprepare(clk_parent); clk_disable_unprepare(clk_old_parent); clk_put(clk_child); clk_put(clk_parent); clk_put(clk_old_parent); /* expected mfc related ref clock value be set 0 */ #endif atomic_set(&dev->pm.power, 0); ret = pm_runtime_put_sync(dev->pm.device); MFC_TRACE_DEV("-- Power off: ret(%d)\n", ret); return ret; }
static int img_spdif_out_probe(struct platform_device *pdev) { struct img_spdif_out *spdif; struct resource *res; void __iomem *base; int ret; struct device *dev = &pdev->dev; spdif = devm_kzalloc(&pdev->dev, sizeof(*spdif), GFP_KERNEL); if (!spdif) return -ENOMEM; platform_set_drvdata(pdev, spdif); spdif->dev = &pdev->dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(base)) return PTR_ERR(base); spdif->base = base; spdif->rst = devm_reset_control_get_exclusive(&pdev->dev, "rst"); if (IS_ERR(spdif->rst)) { if (PTR_ERR(spdif->rst) != -EPROBE_DEFER) dev_err(&pdev->dev, "No top level reset found\n"); return PTR_ERR(spdif->rst); } spdif->clk_sys = devm_clk_get(&pdev->dev, "sys"); if (IS_ERR(spdif->clk_sys)) { if (PTR_ERR(spdif->clk_sys) != -EPROBE_DEFER) dev_err(dev, "Failed to acquire clock 'sys'\n"); return PTR_ERR(spdif->clk_sys); } spdif->clk_ref = devm_clk_get(&pdev->dev, "ref"); if (IS_ERR(spdif->clk_ref)) { if (PTR_ERR(spdif->clk_ref) != -EPROBE_DEFER) dev_err(dev, "Failed to acquire clock 'ref'\n"); return PTR_ERR(spdif->clk_ref); } ret = clk_prepare_enable(spdif->clk_sys); if (ret) return ret; img_spdif_out_writel(spdif, IMG_SPDIF_OUT_CTL_FS_MASK, IMG_SPDIF_OUT_CTL); img_spdif_out_reset(spdif); pm_runtime_enable(&pdev->dev); if (!pm_runtime_enabled(&pdev->dev)) { ret = img_spdif_out_resume(&pdev->dev); if (ret) goto err_pm_disable; } spin_lock_init(&spdif->lock); spdif->dma_data.addr = res->start + IMG_SPDIF_OUT_TX_FIFO; spdif->dma_data.addr_width = 4; spdif->dma_data.maxburst = 4; ret = devm_snd_soc_register_component(&pdev->dev, &img_spdif_out_component, &img_spdif_out_dai, 1); if (ret) goto err_suspend; ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0); if (ret) goto err_suspend; dev_dbg(&pdev->dev, "Probe successful\n"); return 0; err_suspend: if (!pm_runtime_status_suspended(&pdev->dev)) img_spdif_out_suspend(&pdev->dev); err_pm_disable: pm_runtime_disable(&pdev->dev); clk_disable_unprepare(spdif->clk_sys); return ret; }
/* Search EMAC board, allocate space and register it */ static int emac_probe(struct vmm_device *pdev, const struct vmm_devtree_nodeid *devid) { struct device_node *np = pdev->node; struct emac_board_info *db; struct net_device *ndev; int ret = 0; const char *mac_addr; virtual_addr_t reg_addr; ndev = alloc_etherdev(sizeof(struct emac_board_info)); if (!ndev) { dev_err(pdev, "%s: could not allocate device.\n", __func__); return -ENOMEM; } strlcpy(ndev->name, pdev->name, sizeof(ndev->name)); SET_NETDEV_DEV(ndev, pdev); db = netdev_priv(ndev); memset(db, 0, sizeof(*db)); db->ndev = ndev; db->pdev = pdev; spin_lock_init(&db->lock); if ((ret = vmm_devtree_request_regmap(np, ®_addr, 0, "Sun4i EMAC"))) { vmm_printf("%s: Failed to ioreamp\n", __func__); return -ENOMEM; } db->membase = (void *) reg_addr; /* fill in parameters for net-dev structure */ ndev->base_addr = (unsigned long)db->membase; ret = vmm_devtree_irq_get(np, &ndev->irq, 0); if (ret) { vmm_printf("%s: No irq resource\n", __func__); goto out; } db->clk = clk_get(pdev, NULL); if (IS_ERR(db->clk)) goto out; clk_prepare_enable(db->clk); db->phy_node = vmm_devtree_parse_phandle(np, "phy", 0); if (!db->phy_node) { dev_err(pdev, "%s: no associated PHY\n", __func__); ret = -ENODEV; goto out; } /* Read MAC-address from DT */ mac_addr = of_get_mac_address(np); if (mac_addr) memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); /* Check if the MAC address is valid, if not get a random one */ if (!is_valid_ether_addr(ndev->dev_addr)) { eth_hw_addr_random(ndev); dev_info(pdev, "using random MAC address: "); print_mac_address_fmt(ndev->dev_addr); } db->emacrx_completed_flag = 1; emac_powerup(ndev); emac_reset(db); ether_setup(ndev); ndev->netdev_ops = &emac_netdev_ops; ndev->watchdog_timeo = msecs_to_jiffies(watchdog); ndev->ethtool_ops = &emac_ethtool_ops; platform_set_drvdata(pdev, ndev); /* Carrier starts down, phylib will bring it up */ netif_carrier_off(ndev); ret = register_netdev(ndev); if (ret) { dev_err(pdev, "%s: Registering netdev failed!\n", __func__); ret = -ENODEV; goto out; } dev_info(pdev, "%s: at %p, IRQ %d MAC: ", ndev->name, db->membase, ndev->irq); print_mac_address_fmt(ndev->dev_addr); return 0; out: dev_err(pdev, "%s: not found (%d).\n", __func__, ret); free_netdev(ndev); return ret; }
/* * Set the DDR to either 528MHz or 400MHz for iMX6qd * or 400MHz for iMX6dl. */ static int set_high_bus_freq(int high_bus_freq) { struct clk *periph_clk_parent; if (bus_freq_scaling_initialized && bus_freq_scaling_is_active) cancel_delayed_work_sync(&low_bus_freq_handler); if (busfreq_suspended) return 0; if (cpu_is_imx6q()) periph_clk_parent = pll2_bus; else periph_clk_parent = pll2_400; if (!bus_freq_scaling_initialized || !bus_freq_scaling_is_active) return 0; if (high_bus_freq_mode) return 0; /* medium bus freq is only supported for MX6DQ */ if (med_bus_freq_mode && !high_bus_freq) return 0; if (low_bus_freq_mode || ultra_low_bus_freq_mode) busfreq_notify(LOW_BUSFREQ_EXIT); if (cpu_is_imx6()) clk_prepare_enable(pll3); if (cpu_is_imx7d()) exit_lpm_imx7d(); else if (cpu_is_imx6sl()) exit_lpm_imx6sl(); else if (cpu_is_imx6sx() || cpu_is_imx6ul()) exit_lpm_imx6_up(); else { if (high_bus_freq) { clk_prepare_enable(pll2_400); update_ddr_freq_imx_smp(ddr_normal_rate); /* Make sure periph clk's parent also got updated */ imx_clk_set_parent(periph_clk2_sel, pll3); imx_clk_set_parent(periph_pre_clk, periph_clk_parent); imx_clk_set_parent(periph_clk, periph_pre_clk); if (cpu_is_imx6dl()) { /* Set axi to pll3_pfd1_540m */ imx_clk_set_parent(axi_alt_sel_clk, pll3_pfd1_540m); imx_clk_set_parent(axi_sel_clk, axi_alt_sel_clk); } clk_disable_unprepare(pll2_400); } else { update_ddr_freq_imx_smp(ddr_med_rate); /* Make sure periph clk's parent also got updated */ imx_clk_set_parent(periph_clk2_sel, pll3); imx_clk_set_parent(periph_pre_clk, pll2_400); imx_clk_set_parent(periph_clk, periph_pre_clk); } if (audio_bus_freq_mode) clk_disable_unprepare(pll2_400); } high_bus_freq_mode = 1; med_bus_freq_mode = 0; low_bus_freq_mode = 0; audio_bus_freq_mode = 0; cur_bus_freq_mode = BUS_FREQ_HIGH; if (cpu_is_imx6()) clk_disable_unprepare(pll3); if (high_bus_freq_mode) dev_dbg(busfreq_dev, "Bus freq set to high mode. Count:\ high %d, med %d, audio %d\n", high_bus_count, med_bus_count, audio_bus_count); if (med_bus_freq_mode) dev_dbg(busfreq_dev, "Bus freq set to med mode. Count:\ high %d, med %d, audio %d\n", high_bus_count, med_bus_count, audio_bus_count); return 0; }
static int dspi_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct spi_master *master; struct fsl_dspi *dspi; struct resource *res; int ret = 0, cs_num, bus_num; master = spi_alloc_master(&pdev->dev, sizeof(struct fsl_dspi)); if (!master) return -ENOMEM; dspi = spi_master_get_devdata(master); dspi->pdev = pdev; dspi->bitbang.master = master; dspi->bitbang.chipselect = dspi_chipselect; dspi->bitbang.setup_transfer = dspi_setup_transfer; dspi->bitbang.txrx_bufs = dspi_txrx_transfer; dspi->bitbang.master->setup = dspi_setup; dspi->bitbang.master->dev.of_node = pdev->dev.of_node; master->mode_bits = SPI_CPOL | SPI_CPHA; master->bits_per_word_mask = SPI_BPW_MASK(4) | SPI_BPW_MASK(8) | SPI_BPW_MASK(16); ret = of_property_read_u32(np, "spi-num-chipselects", &cs_num); if (ret < 0) { dev_err(&pdev->dev, "can't get spi-num-chipselects\n"); goto out_master_put; } master->num_chipselect = cs_num; ret = of_property_read_u32(np, "bus-num", &bus_num); if (ret < 0) { dev_err(&pdev->dev, "can't get bus-num\n"); goto out_master_put; } master->bus_num = bus_num; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); dspi->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(dspi->base)) { ret = PTR_ERR(dspi->base); goto out_master_put; } dspi->irq = platform_get_irq(pdev, 0); if (dspi->irq < 0) { dev_err(&pdev->dev, "can't get platform irq\n"); ret = dspi->irq; goto out_master_put; } ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt, 0, pdev->name, dspi); if (ret < 0) { dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n"); goto out_master_put; } dspi->clk = devm_clk_get(&pdev->dev, "dspi"); if (IS_ERR(dspi->clk)) { ret = PTR_ERR(dspi->clk); dev_err(&pdev->dev, "unable to get clock\n"); goto out_master_put; } clk_prepare_enable(dspi->clk); init_waitqueue_head(&dspi->waitq); platform_set_drvdata(pdev, master); ret = spi_bitbang_start(&dspi->bitbang); if (ret != 0) { dev_err(&pdev->dev, "Problem registering DSPI master\n"); goto out_clk_put; } pr_info(KERN_INFO "Freescale DSPI master initialized\n"); return ret; out_clk_put: clk_disable_unprepare(dspi->clk); out_master_put: spi_master_put(master); return ret; }
static int dw_mipi_dsi_phy_init(struct dw_mipi_dsi *dsi) { int ret, testdin, vco, val; vco = (dsi->lane_mbps < 200) ? 0 : (dsi->lane_mbps + 100) / 200; testdin = max_mbps_to_testdin(dsi->lane_mbps); if (testdin < 0) { DRM_DEV_ERROR(dsi->dev, "failed to get testdin for %dmbps lane clock\n", dsi->lane_mbps); return testdin; } /* Start by clearing PHY state */ dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_UNTESTCLR); dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_TESTCLR); dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_UNTESTCLR); ret = clk_prepare_enable(dsi->phy_cfg_clk); if (ret) { DRM_DEV_ERROR(dsi->dev, "Failed to enable phy_cfg_clk\n"); return ret; } dw_mipi_dsi_phy_write(dsi, 0x10, BYPASS_VCO_RANGE | VCO_RANGE_CON_SEL(vco) | VCO_IN_CAP_CON_LOW | REF_BIAS_CUR_SEL); dw_mipi_dsi_phy_write(dsi, 0x11, CP_CURRENT_3MA); dw_mipi_dsi_phy_write(dsi, 0x12, CP_PROGRAM_EN | LPF_PROGRAM_EN | LPF_RESISTORS_20_KOHM); dw_mipi_dsi_phy_write(dsi, 0x44, HSFREQRANGE_SEL(testdin)); dw_mipi_dsi_phy_write(dsi, 0x17, INPUT_DIVIDER(dsi->input_div)); dw_mipi_dsi_phy_write(dsi, 0x18, LOOP_DIV_LOW_SEL(dsi->feedback_div) | LOW_PROGRAM_EN); dw_mipi_dsi_phy_write(dsi, 0x18, LOOP_DIV_HIGH_SEL(dsi->feedback_div) | HIGH_PROGRAM_EN); dw_mipi_dsi_phy_write(dsi, 0x19, PLL_LOOP_DIV_EN | PLL_INPUT_DIV_EN); dw_mipi_dsi_phy_write(dsi, 0x22, LOW_PROGRAM_EN | BIASEXTR_SEL(BIASEXTR_127_7)); dw_mipi_dsi_phy_write(dsi, 0x22, HIGH_PROGRAM_EN | BANDGAP_SEL(BANDGAP_96_10)); dw_mipi_dsi_phy_write(dsi, 0x20, POWER_CONTROL | INTERNAL_REG_CURRENT | BIAS_BLOCK_ON | BANDGAP_ON); dw_mipi_dsi_phy_write(dsi, 0x21, TER_RESISTOR_LOW | TER_CAL_DONE | SETRD_MAX | TER_RESISTORS_ON); dw_mipi_dsi_phy_write(dsi, 0x21, TER_RESISTOR_HIGH | LEVEL_SHIFTERS_ON | SETRD_MAX | POWER_MANAGE | TER_RESISTORS_ON); dw_mipi_dsi_phy_write(dsi, 0x60, TLP_PROGRAM_EN | ns2bc(dsi, 500)); dw_mipi_dsi_phy_write(dsi, 0x61, THS_PRE_PROGRAM_EN | ns2ui(dsi, 40)); dw_mipi_dsi_phy_write(dsi, 0x62, THS_ZERO_PROGRAM_EN | ns2bc(dsi, 300)); dw_mipi_dsi_phy_write(dsi, 0x63, THS_PRE_PROGRAM_EN | ns2ui(dsi, 100)); dw_mipi_dsi_phy_write(dsi, 0x64, BIT(5) | ns2bc(dsi, 100)); dw_mipi_dsi_phy_write(dsi, 0x65, BIT(5) | (ns2bc(dsi, 60) + 7)); dw_mipi_dsi_phy_write(dsi, 0x70, TLP_PROGRAM_EN | ns2bc(dsi, 500)); dw_mipi_dsi_phy_write(dsi, 0x71, THS_PRE_PROGRAM_EN | (ns2ui(dsi, 50) + 5)); dw_mipi_dsi_phy_write(dsi, 0x72, THS_ZERO_PROGRAM_EN | (ns2bc(dsi, 140) + 2)); dw_mipi_dsi_phy_write(dsi, 0x73, THS_PRE_PROGRAM_EN | (ns2ui(dsi, 60) + 8)); dw_mipi_dsi_phy_write(dsi, 0x74, BIT(5) | ns2bc(dsi, 100)); dsi_write(dsi, DSI_PHY_RSTZ, PHY_ENFORCEPLL | PHY_ENABLECLK | PHY_UNRSTZ | PHY_UNSHUTDOWNZ); ret = readl_poll_timeout(dsi->base + DSI_PHY_STATUS, val, val & LOCK, 1000, PHY_STATUS_TIMEOUT_US); if (ret < 0) { DRM_DEV_ERROR(dsi->dev, "failed to wait for phy lock state\n"); goto phy_init_end; } ret = readl_poll_timeout(dsi->base + DSI_PHY_STATUS, val, val & STOP_STATE_CLK_LANE, 1000, PHY_STATUS_TIMEOUT_US); if (ret < 0) DRM_DEV_ERROR(dsi->dev, "failed to wait for phy clk lane stop state\n"); phy_init_end: clk_disable_unprepare(dsi->phy_cfg_clk); return ret; }
static int ingenic_uart_probe(struct platform_device *pdev) { struct uart_8250_port uart = {}; struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); struct resource *irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); struct ingenic_uart_data *data; const struct ingenic_uart_config *cdata; const struct of_device_id *match; int err, line; match = of_match_device(of_match, &pdev->dev); if (!match) { dev_err(&pdev->dev, "Error: No device match found\n"); return -ENODEV; } cdata = match->data; if (!regs || !irq) { dev_err(&pdev->dev, "no registers/irq defined\n"); return -EINVAL; } data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; spin_lock_init(&uart.port.lock); uart.port.type = PORT_16550A; uart.port.flags = UPF_SKIP_TEST | UPF_IOREMAP | UPF_FIXED_TYPE; uart.port.iotype = UPIO_MEM; uart.port.mapbase = regs->start; uart.port.regshift = 2; uart.port.serial_out = ingenic_uart_serial_out; uart.port.serial_in = ingenic_uart_serial_in; uart.port.irq = irq->start; uart.port.dev = &pdev->dev; uart.port.fifosize = cdata->fifosize; uart.tx_loadsz = cdata->tx_loadsz; uart.capabilities = UART_CAP_FIFO | UART_CAP_RTOIE; /* Check for a fixed line number */ line = of_alias_get_id(pdev->dev.of_node, "serial"); if (line >= 0) uart.port.line = line; uart.port.membase = devm_ioremap(&pdev->dev, regs->start, resource_size(regs)); if (!uart.port.membase) return -ENOMEM; data->clk_module = devm_clk_get(&pdev->dev, "module"); if (IS_ERR(data->clk_module)) { err = PTR_ERR(data->clk_module); if (err != -EPROBE_DEFER) dev_err(&pdev->dev, "unable to get module clock: %d\n", err); return err; } data->clk_baud = devm_clk_get(&pdev->dev, "baud"); if (IS_ERR(data->clk_baud)) { err = PTR_ERR(data->clk_baud); if (err != -EPROBE_DEFER) dev_err(&pdev->dev, "unable to get baud clock: %d\n", err); return err; } err = clk_prepare_enable(data->clk_module); if (err) { dev_err(&pdev->dev, "could not enable module clock: %d\n", err); goto out; } err = clk_prepare_enable(data->clk_baud); if (err) { dev_err(&pdev->dev, "could not enable baud clock: %d\n", err); goto out_disable_moduleclk; } uart.port.uartclk = clk_get_rate(data->clk_baud); data->line = serial8250_register_8250_port(&uart); if (data->line < 0) { err = data->line; goto out_disable_baudclk; } platform_set_drvdata(pdev, data); return 0; out_disable_baudclk: clk_disable_unprepare(data->clk_baud); out_disable_moduleclk: clk_disable_unprepare(data->clk_module); out: return err; }
static int spi_qup_probe(struct platform_device *pdev) { struct spi_master *master; struct clk *iclk, *cclk; struct spi_qup *controller; struct resource *res; struct device *dev; void __iomem *base; u32 max_freq, iomode, num_cs; int ret, irq, size; dev = &pdev->dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); base = devm_ioremap_resource(dev, res); if (IS_ERR(base)) return PTR_ERR(base); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; cclk = devm_clk_get(dev, "core"); if (IS_ERR(cclk)) return PTR_ERR(cclk); iclk = devm_clk_get(dev, "iface"); if (IS_ERR(iclk)) return PTR_ERR(iclk); /* This is optional parameter */ if (of_property_read_u32(dev->of_node, "spi-max-frequency", &max_freq)) max_freq = SPI_MAX_RATE; if (!max_freq || max_freq > SPI_MAX_RATE) { dev_err(dev, "invalid clock frequency %d\n", max_freq); return -ENXIO; } ret = clk_prepare_enable(cclk); if (ret) { dev_err(dev, "cannot enable core clock\n"); return ret; } ret = clk_prepare_enable(iclk); if (ret) { clk_disable_unprepare(cclk); dev_err(dev, "cannot enable iface clock\n"); return ret; } master = spi_alloc_master(dev, sizeof(struct spi_qup)); if (!master) { clk_disable_unprepare(cclk); clk_disable_unprepare(iclk); dev_err(dev, "cannot allocate master\n"); return -ENOMEM; } /* use num-cs unless not present or out of range */ if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) || num_cs > SPI_NUM_CHIPSELECTS) master->num_chipselect = SPI_NUM_CHIPSELECTS; else master->num_chipselect = num_cs; master->bus_num = pdev->id; master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); master->max_speed_hz = max_freq; master->transfer_one = spi_qup_transfer_one; master->dev.of_node = pdev->dev.of_node; master->auto_runtime_pm = true; master->dma_alignment = dma_get_cache_alignment(); master->max_dma_len = SPI_MAX_DMA_XFER; platform_set_drvdata(pdev, master); controller = spi_master_get_devdata(master); controller->dev = dev; controller->base = base; controller->iclk = iclk; controller->cclk = cclk; controller->irq = irq; ret = spi_qup_init_dma(master, res->start); if (ret == -EPROBE_DEFER) goto error; else if (!ret) master->can_dma = spi_qup_can_dma; /* set v1 flag if device is version 1 */ if (of_device_is_compatible(dev->of_node, "qcom,spi-qup-v1.1.1")) controller->qup_v1 = 1; spin_lock_init(&controller->lock); init_completion(&controller->done); iomode = readl_relaxed(base + QUP_IO_M_MODES); size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode); if (size) controller->out_blk_sz = size * 16; else controller->out_blk_sz = 4; size = QUP_IO_M_INPUT_BLOCK_SIZE(iomode); if (size) controller->in_blk_sz = size * 16; else controller->in_blk_sz = 4; size = QUP_IO_M_OUTPUT_FIFO_SIZE(iomode); controller->out_fifo_sz = controller->out_blk_sz * (2 << size); size = QUP_IO_M_INPUT_FIFO_SIZE(iomode); controller->in_fifo_sz = controller->in_blk_sz * (2 << size); dev_info(dev, "IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n", controller->in_blk_sz, controller->in_fifo_sz, controller->out_blk_sz, controller->out_fifo_sz); writel_relaxed(1, base + QUP_SW_RESET); ret = spi_qup_set_state(controller, QUP_STATE_RESET); if (ret) { dev_err(dev, "cannot set RESET state\n"); goto error_dma; } writel_relaxed(0, base + QUP_OPERATIONAL); writel_relaxed(0, base + QUP_IO_M_MODES); if (!controller->qup_v1) writel_relaxed(0, base + QUP_OPERATIONAL_MASK); writel_relaxed(SPI_ERROR_CLK_UNDER_RUN | SPI_ERROR_CLK_OVER_RUN, base + SPI_ERROR_FLAGS_EN); /* if earlier version of the QUP, disable INPUT_OVERRUN */ if (controller->qup_v1) writel_relaxed(QUP_ERROR_OUTPUT_OVER_RUN | QUP_ERROR_INPUT_UNDER_RUN | QUP_ERROR_OUTPUT_UNDER_RUN, base + QUP_ERROR_FLAGS_EN); writel_relaxed(0, base + SPI_CONFIG); writel_relaxed(SPI_IO_C_NO_TRI_STATE, base + SPI_IO_CONTROL); ret = devm_request_irq(dev, irq, spi_qup_qup_irq, IRQF_TRIGGER_HIGH, pdev->name, controller); if (ret) goto error_dma; pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC); pm_runtime_use_autosuspend(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); ret = devm_spi_register_master(dev, master); if (ret) goto disable_pm; return 0; disable_pm: pm_runtime_disable(&pdev->dev); error_dma: spi_qup_release_dma(master); error: clk_disable_unprepare(cclk); clk_disable_unprepare(iclk); spi_master_put(master); return ret; }
static int sdhci_pxav3_probe(struct platform_device *pdev) { struct sdhci_pltfm_host *pltfm_host; struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data; struct device *dev = &pdev->dev; struct device_node *np = pdev->dev.of_node; struct sdhci_host *host = NULL; struct sdhci_pxa *pxa = NULL; const struct of_device_id *match; int ret; struct clk *clk; pxa = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_pxa), GFP_KERNEL); if (!pxa) return -ENOMEM; host = sdhci_pltfm_init(pdev, &sdhci_pxav3_pdata, 0); if (IS_ERR(host)) return PTR_ERR(host); /* enable 1/8V DDR capable */ host->mmc->caps |= MMC_CAP_1_8V_DDR; if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) { ret = armada_38x_quirks(pdev, host); if (ret < 0) goto err_clk_get; ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info()); if (ret < 0) goto err_mbus_win; } pltfm_host = sdhci_priv(host); pltfm_host->priv = pxa; clk = devm_clk_get(dev, NULL); if (IS_ERR(clk)) { dev_err(dev, "failed to get io clock\n"); ret = PTR_ERR(clk); goto err_clk_get; } pltfm_host->clk = clk; clk_prepare_enable(clk); match = of_match_device(of_match_ptr(sdhci_pxav3_of_match), &pdev->dev); if (match) { ret = mmc_of_parse(host->mmc); if (ret) goto err_of_parse; sdhci_get_of_property(pdev); pdata = pxav3_get_mmc_pdata(dev); } else if (pdata) { /* on-chip device */ if (pdata->flags & PXA_FLAG_CARD_PERMANENT) host->mmc->caps |= MMC_CAP_NONREMOVABLE; /* If slot design supports 8 bit data, indicate this to MMC. */ if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT) host->mmc->caps |= MMC_CAP_8_BIT_DATA; if (pdata->quirks) host->quirks |= pdata->quirks; if (pdata->quirks2) host->quirks2 |= pdata->quirks2; if (pdata->host_caps) host->mmc->caps |= pdata->host_caps; if (pdata->host_caps2) host->mmc->caps2 |= pdata->host_caps2; if (pdata->pm_caps) host->mmc->pm_caps |= pdata->pm_caps; if (gpio_is_valid(pdata->ext_cd_gpio)) { ret = mmc_gpio_request_cd(host->mmc, pdata->ext_cd_gpio, 0); if (ret) { dev_err(mmc_dev(host->mmc), "failed to allocate card detect gpio\n"); goto err_cd_req; } } } pm_runtime_get_noresume(&pdev->dev); pm_runtime_set_active(&pdev->dev); pm_runtime_set_autosuspend_delay(&pdev->dev, PXAV3_RPM_DELAY_MS); pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_enable(&pdev->dev); pm_suspend_ignore_children(&pdev->dev, 1); ret = sdhci_add_host(host); if (ret) { dev_err(&pdev->dev, "failed to add host\n"); goto err_add_host; } platform_set_drvdata(pdev, host); if (host->mmc->pm_caps & MMC_PM_KEEP_POWER) { device_init_wakeup(&pdev->dev, 1); host->mmc->pm_flags |= MMC_PM_WAKE_SDIO_IRQ; } else { device_init_wakeup(&pdev->dev, 0); } pm_runtime_put_autosuspend(&pdev->dev); return 0; err_add_host: pm_runtime_disable(&pdev->dev); pm_runtime_put_noidle(&pdev->dev); err_of_parse: err_cd_req: clk_disable_unprepare(clk); err_clk_get: err_mbus_win: sdhci_pltfm_free(pdev); return ret; }
static int msm8660_startup(struct snd_pcm_substream *substream) { int ret = 0; pr_info("[%s:%s]\n", __MM_FILE__, __func__); // if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { rx_osr_clk = clk_get(NULL, "i2s_spkr_osr_clk"); if (IS_ERR(rx_osr_clk)) { pr_debug("Failed to get i2s_spkr_osr_clk\n"); return PTR_ERR(rx_osr_clk); } /* Master clock OSR 256 */ /* Initially set to Lowest sample rate Needed */ clk_set_rate(rx_osr_clk, 8000 * 256); ret = clk_prepare_enable(rx_osr_clk); if (ret != 0) { pr_debug("Unable to enable i2s_spkr_osr_clk\n"); clk_put(rx_osr_clk); return ret; } rx_bit_clk = clk_get(NULL, "i2s_spkr_bit_clk"); if (IS_ERR(rx_bit_clk)) { pr_debug("Failed to get i2s_spkr_bit_clk\n"); clk_disable_unprepare(rx_osr_clk); clk_put(rx_osr_clk); return PTR_ERR(rx_bit_clk); } clk_set_rate(rx_bit_clk, 8); ret = clk_prepare_enable(rx_bit_clk); if (ret != 0) { pr_debug("Unable to enable i2s_spkr_bit_clk\n"); clk_put(rx_bit_clk); clk_disable_unprepare(rx_osr_clk); clk_put(rx_osr_clk); return ret; } timpani_poweramp_on(); msleep(30); /* End of platform specific logic */ } else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { tx_osr_clk = clk_get(NULL, "i2s_mic_osr_clk"); if (IS_ERR(tx_osr_clk)) { pr_debug("Failed to get i2s_mic_osr_clk\n"); return PTR_ERR(tx_osr_clk); } /* Master clock OSR 256 */ clk_set_rate(tx_osr_clk, 8000 * 256); ret = clk_prepare_enable(tx_osr_clk); if (ret != 0) { pr_debug("Unable to enable i2s_mic_osr_clk\n"); clk_put(tx_osr_clk); return ret; } tx_bit_clk = clk_get(NULL, "i2s_mic_bit_clk"); if (IS_ERR(tx_bit_clk)) { pr_debug("Failed to get i2s_mic_bit_clk\n"); clk_disable_unprepare(tx_osr_clk); clk_put(tx_osr_clk); return PTR_ERR(tx_bit_clk); } clk_set_rate(tx_bit_clk, 8); ret = clk_prepare_enable(tx_bit_clk); if (ret != 0) { pr_debug("Unable to enable i2s_mic_bit_clk\n"); clk_put(tx_bit_clk); clk_disable_unprepare(tx_osr_clk); clk_put(tx_osr_clk); return ret; } msm_snddev_enable_dmic_power(); msleep(30); } return ret; }
static int snddev_mi2s_open(struct msm_snddev_info *dev_info) { int rc = 0; union afe_port_config afe_config; u8 channels; u8 num_of_sd_lines = 0; struct snddev_mi2s_drv_state *drv = &snddev_mi2s_drv; struct snddev_mi2s_data *snddev_mi2s_data = dev_info->private_data; if (!dev_info) { pr_err("%s: msm_snddev_info is null\n", __func__); return -EINVAL; } /* set up osr clk */ drv->tx_osrclk = clk_get_sys(NULL, "mi2s_osr_clk"); if (IS_ERR(drv->tx_osrclk)) pr_err("%s master clock Error\n", __func__); rc = clk_set_rate(drv->tx_osrclk, SNDDEV_MI2S_CLK_RATE(dev_info->sample_rate)); if (IS_ERR_VALUE(rc)) { pr_err("ERROR setting osr clock\n"); return -ENODEV; } clk_prepare_enable(drv->tx_osrclk); /* set up bit clk */ drv->tx_bitclk = clk_get_sys(NULL, "mi2s_bit_clk"); if (IS_ERR(drv->tx_bitclk)) pr_err("%s clock Error\n", __func__); rc = clk_set_rate(drv->tx_bitclk, 8); if (IS_ERR_VALUE(rc)) { pr_err("ERROR setting bit clock\n"); clk_disable_unprepare(drv->tx_osrclk); return -ENODEV; } clk_prepare_enable(drv->tx_bitclk); afe_config.mi2s.bitwidth = 16; if (snddev_mi2s_data->channel_mode == 1) channels = AFE_MI2S_MONO; else if (snddev_mi2s_data->channel_mode == 2) channels = AFE_MI2S_STEREO; else if (snddev_mi2s_data->channel_mode == 4) channels = AFE_MI2S_4CHANNELS; else if (snddev_mi2s_data->channel_mode == 6) channels = AFE_MI2S_6CHANNELS; else if (snddev_mi2s_data->channel_mode == 8) channels = AFE_MI2S_8CHANNELS; else { pr_err("ERROR: Invalid MI2S channel mode\n"); goto error_invalid_data; } num_of_sd_lines = num_of_bits_set(snddev_mi2s_data->sd_lines); switch (num_of_sd_lines) { case 1: switch (snddev_mi2s_data->sd_lines) { case MI2S_SD0: afe_config.mi2s.line = AFE_I2S_SD0; break; case MI2S_SD1: afe_config.mi2s.line = AFE_I2S_SD1; break; case MI2S_SD2: afe_config.mi2s.line = AFE_I2S_SD2; break; case MI2S_SD3: afe_config.mi2s.line = AFE_I2S_SD3; break; default: pr_err("%s: invalid SD line\n", __func__); goto error_invalid_data; } if (channels != AFE_MI2S_STEREO && channels != AFE_MI2S_MONO) { pr_err("%s: for one SD line, channel " "must be 1 or 2\n", __func__); goto error_invalid_data; } afe_config.mi2s.channel = channels; break; case 2: switch (snddev_mi2s_data->sd_lines) { case MI2S_SD0 | MI2S_SD1: afe_config.mi2s.line = AFE_I2S_QUAD01; break; case MI2S_SD2 | MI2S_SD3: afe_config.mi2s.line = AFE_I2S_QUAD23; break; default: pr_err("%s: invalid SD line\n", __func__); goto error_invalid_data; } if (channels != AFE_MI2S_4CHANNELS) { pr_err("%s: for two SD lines, channel " "must be 1 and 2 or 3 and 4\n", __func__); goto error_invalid_data; } break; case 3: switch (snddev_mi2s_data->sd_lines) { case MI2S_SD0 | MI2S_SD1 | MI2S_SD2: afe_config.mi2s.line = AFE_I2S_6CHS; break; default: pr_err("%s: invalid SD lines\n", __func__); goto error_invalid_data; } if (channels != AFE_MI2S_6CHANNELS) { pr_err("%s: for three SD lines, lines " "must be 1, 2, and 3\n", __func__); goto error_invalid_data; } break; case 4: switch (snddev_mi2s_data->sd_lines) { case MI2S_SD0 | MI2S_SD1 | MI2S_SD2 | MI2S_SD3: afe_config.mi2s.line = AFE_I2S_8CHS; break; default: pr_err("%s: invalid SD lines\n", __func__); goto error_invalid_data; } if (channels != AFE_MI2S_8CHANNELS) { pr_err("%s: for four SD lines, lines " "must be 1, 2, 3, and 4\n", __func__); goto error_invalid_data; } break; default: pr_err("%s: invalid SD lines\n", __func__); goto error_invalid_data; } afe_config.mi2s.ws = 1; afe_config.mi2s.format = MSM_AFE_I2S_FORMAT_LPCM; rc = afe_open(snddev_mi2s_data->copp_id, &afe_config, dev_info->sample_rate); if (rc < 0) { pr_err("%s: afe_open failed\n", __func__); goto error_invalid_data; } /*enable fm gpio here*/ rc = mi2s_gpios_request(); if (rc < 0) { pr_err("%s: GPIO request failed\n", __func__); return rc; } pr_info("%s: afe_open done\n", __func__); return rc; error_invalid_data: clk_disable_unprepare(drv->tx_bitclk); clk_disable_unprepare(drv->tx_osrclk); return -EINVAL; }
static int sdhci_pxav2_probe(struct platform_device *pdev) { struct sdhci_pltfm_host *pltfm_host; struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data; struct device *dev = &pdev->dev; struct sdhci_host *host = NULL; struct sdhci_pxa *pxa = NULL; const struct of_device_id *match; int ret; struct clk *clk; pxa = kzalloc(sizeof(struct sdhci_pxa), GFP_KERNEL); if (!pxa) return -ENOMEM; host = sdhci_pltfm_init(pdev, NULL, 0); if (IS_ERR(host)) { kfree(pxa); return PTR_ERR(host); } pltfm_host = sdhci_priv(host); pltfm_host->priv = pxa; clk = clk_get(dev, "PXA-SDHCLK"); if (IS_ERR(clk)) { dev_err(dev, "failed to get io clock\n"); ret = PTR_ERR(clk); goto err_clk_get; } pltfm_host->clk = clk; clk_prepare_enable(clk); host->quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN; match = of_match_device(of_match_ptr(sdhci_pxav2_of_match), &pdev->dev); if (match) { pdata = pxav2_get_mmc_pdata(dev); } if (pdata) { if (pdata->flags & PXA_FLAG_CARD_PERMANENT) { /* on-chip device */ host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; host->mmc->caps |= MMC_CAP_NONREMOVABLE; } /* If slot design supports 8 bit data, indicate this to MMC. */ if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT) host->mmc->caps |= MMC_CAP_8_BIT_DATA; if (pdata->quirks) host->quirks |= pdata->quirks; if (pdata->host_caps) host->mmc->caps |= pdata->host_caps; if (pdata->pm_caps) host->mmc->pm_caps |= pdata->pm_caps; } host->ops = &pxav2_sdhci_ops; ret = sdhci_add_host(host); if (ret) { dev_err(&pdev->dev, "failed to add host\n"); goto err_add_host; } platform_set_drvdata(pdev, host); return 0; err_add_host: clk_disable_unprepare(clk); clk_put(clk); err_clk_get: sdhci_pltfm_free(pdev); kfree(pxa); return ret; }
if (unlikely(ret)) goto error; return 0; error: pr_err("%s(%p): module reset timeout\n", __func__, reset_addr); return -ETIMEDOUT; } int gpmi_init(struct gpmi_nand_data *this) { struct resources *r = &this->resources; int ret; ret = clk_prepare_enable(r->clock); if (ret) goto err_out; ret = gpmi_reset_block(r->gpmi_regs, false); if (ret) goto err_out; /* Choose NAND mode. */ writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR); /* Set the IRQ polarity. */ writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY, r->gpmi_regs + HW_GPMI_CTRL1_SET); /* Disable Write-Protection. */ writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET);
int tegra_asoc_utils_set_rate(struct tegra_asoc_utils_data *data, int srate, int mclk) { int new_baseclock; bool clk_change; int err; switch (srate) { case 11025: case 22050: case 44100: case 88200: if (data->soc == TEGRA_ASOC_UTILS_SOC_TEGRA20) new_baseclock = 56448000; else new_baseclock = 564480000; break; case 8000: case 16000: case 32000: case 48000: case 64000: case 96000: if (data->soc == TEGRA_ASOC_UTILS_SOC_TEGRA20) new_baseclock = 73728000; else new_baseclock = 552960000; break; default: return -EINVAL; } clk_change = ((new_baseclock != data->set_baseclock) || (mclk != data->set_mclk)); if (!clk_change) return 0; data->set_baseclock = 0; data->set_mclk = 0; clk_disable_unprepare(data->clk_cdev1); clk_disable_unprepare(data->clk_pll_a_out0); clk_disable_unprepare(data->clk_pll_a); err = clk_set_rate(data->clk_pll_a, new_baseclock); if (err) { dev_err(data->dev, "Can't set pll_a rate: %d\n", err); return err; } err = clk_set_rate(data->clk_pll_a_out0, mclk); if (err) { dev_err(data->dev, "Can't set pll_a_out0 rate: %d\n", err); return err; } /* Don't set cdev1/extern1 rate; it's locked to pll_a_out0 */ err = clk_prepare_enable(data->clk_pll_a); if (err) { dev_err(data->dev, "Can't enable pll_a: %d\n", err); return err; } err = clk_prepare_enable(data->clk_pll_a_out0); if (err) { dev_err(data->dev, "Can't enable pll_a_out0: %d\n", err); return err; } err = clk_prepare_enable(data->clk_cdev1); if (err) { dev_err(data->dev, "Can't enable cdev1: %d\n", err); return err; } data->set_baseclock = new_baseclock; data->set_mclk = mclk; return 0; }
static int dc_i2c_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct dc_i2c *i2c; struct resource *r; int ret = 0, irq; i2c = devm_kzalloc(&pdev->dev, sizeof(struct dc_i2c), GFP_KERNEL); if (!i2c) return -ENOMEM; if (of_property_read_u32(pdev->dev.of_node, "clock-frequency", &i2c->frequency)) i2c->frequency = DEFAULT_FREQ; i2c->dev = &pdev->dev; platform_set_drvdata(pdev, i2c); spin_lock_init(&i2c->lock); init_completion(&i2c->done); i2c->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(i2c->clk)) return PTR_ERR(i2c->clk); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); i2c->regs = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(i2c->regs)) return PTR_ERR(i2c->regs); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; ret = devm_request_irq(&pdev->dev, irq, dc_i2c_irq, 0, dev_name(&pdev->dev), i2c); if (ret < 0) return ret; strlcpy(i2c->adap.name, "Conexant Digicolor I2C adapter", sizeof(i2c->adap.name)); i2c->adap.owner = THIS_MODULE; i2c->adap.algo = &dc_i2c_algorithm; i2c->adap.dev.parent = &pdev->dev; i2c->adap.dev.of_node = np; i2c->adap.algo_data = i2c; ret = dc_i2c_init_hw(i2c); if (ret) return ret; ret = clk_prepare_enable(i2c->clk); if (ret < 0) return ret; ret = i2c_add_adapter(&i2c->adap); if (ret < 0) { clk_disable_unprepare(i2c->clk); return ret; } return 0; }
static long pn547_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct pn547_dev *pn547_dev = filp->private_data; switch (cmd) { case PN547_SET_PWR: if (arg == 2) { /* power on with firmware download (requires hw reset) */ gpio_set_value_cansleep(pn547_dev->ven_gpio, 1); gpio_set_value(pn547_dev->firm_gpio, 1); usleep_range(10000, 10050); gpio_set_value_cansleep(pn547_dev->ven_gpio, 0); usleep_range(10000, 10050); gpio_set_value_cansleep(pn547_dev->ven_gpio, 1); usleep_range(10000, 10050); if (atomic_read(&pn547_dev->irq_enabled) == 0) { atomic_set(&pn547_dev->irq_enabled, 1); enable_irq(pn547_dev->client->irq); enable_irq_wake(pn547_dev->client->irq); } pr_info("%s power on with firmware, irq=%d\n", __func__, atomic_read(&pn547_dev->irq_enabled)); #ifdef CONFIG_NFC_PN547_8916_CLK_CTL pn547_dev->nfc_enable = 1; if (IS_ERR(pn547_dev->nfc_clock)) { pr_err("[NFC] %s: Couldn't get D1)\n", __func__); } else { //clk_put(pn547_dev->nfc_clock); pr_info("%s power set and clk_prepare_enable\n", __func__); if (clk_prepare_enable(pn547_dev->nfc_clock)) pr_err("[NFC] %s: Couldn't prepare D1\n", __func__); } #endif } else if (arg == 1) { /* power on */ if (pn547_dev->conf_gpio) pn547_dev->conf_gpio(); gpio_set_value(pn547_dev->firm_gpio, 0); gpio_set_value_cansleep(pn547_dev->ven_gpio, 1); usleep_range(10000, 10050); if (atomic_read(&pn547_dev->irq_enabled) == 0) { atomic_set(&pn547_dev->irq_enabled, 1); enable_irq(pn547_dev->client->irq); enable_irq_wake(pn547_dev->client->irq); } pr_info("%s power on, irq=%d\n", __func__, atomic_read(&pn547_dev->irq_enabled)); #ifdef CONFIG_NFC_PN547_8916_CLK_CTL pn547_dev->nfc_enable = 1; if (IS_ERR(pn547_dev->nfc_clock)) { pr_err("[NFC] %s: Couldn't get D1)\n", __func__); } else { //clk_put(pn547_dev->nfc_clock); pr_info("%s power on and clk_prepare_enable\n", __func__); if (clk_prepare_enable(pn547_dev->nfc_clock)) pr_err("[NFC] %s: Couldn't prepare D1\n", __func__); } #endif } else if (arg == 0) { /* power off */ if (atomic_read(&pn547_dev->irq_enabled) == 1) { atomic_set(&pn547_dev->irq_enabled, 0); disable_irq_wake(pn547_dev->client->irq); disable_irq_nosync(pn547_dev->client->irq); } pr_info("%s power off, irq=%d\n", __func__, atomic_read(&pn547_dev->irq_enabled)); gpio_set_value(pn547_dev->firm_gpio, 0); gpio_set_value_cansleep(pn547_dev->ven_gpio, 0); usleep_range(10000, 10050); #ifdef CONFIG_NFC_PN547_8916_CLK_CTL pn547_dev->nfc_enable = 0; if(pn547_dev->nfc_clock) { clk_disable_unprepare(pn547_dev->nfc_clock); pr_info("%s power off and clk_disable_unprepare\n", __func__); } #endif } else if (arg == 3) { pr_info("%s Read Cancel\n", __func__); pn547_dev->cancel_read = true; atomic_set(&pn547_dev->read_flag, 1); wake_up(&pn547_dev->read_wq); } else { pr_err("%s bad arg %lu\n", __func__, arg); return -EINVAL; } break; default: pr_err("%s bad ioctl %u\n", __func__, cmd); return -EINVAL; } return 0; }
/***************************************************************************** * UART ****************************************************************************/ static unsigned long __init uart_get_clk_rate(struct clk *clk) { clk_prepare_enable(clk); return clk_get_rate(clk); }
int s5p_mfc_set_clock_parent(struct s5p_mfc_dev *dev) { struct clk *clk_child = NULL; struct clk *clk_parent = NULL; #if defined(CONFIG_SOC_EXYNOS5430) if (dev->id == 0) { clk_child = clk_get(dev->device, "mout_aclk_mfc0_333_user"); if (IS_ERR(clk_child)) { pr_err("failed to get %s clock\n", __clk_get_name(clk_child)); return PTR_ERR(clk_child); } clk_parent = clk_get(dev->device, "aclk_mfc0_333"); if (IS_ERR(clk_parent)) { pr_err("failed to get %s clock\n", __clk_get_name(clk_parent)); return PTR_ERR(clk_parent); } clk_set_parent(clk_child, clk_parent); } else if (dev->id == 1) { clk_child = clk_get(dev->device, "mout_aclk_mfc1_333_user"); if (IS_ERR(clk_child)) { pr_err("failed to get %s clock\n", __clk_get_name(clk_child)); return PTR_ERR(clk_child); } clk_parent = clk_get(dev->device, "aclk_mfc1_333"); if (IS_ERR(clk_parent)) { pr_err("failed to get %s clock\n", __clk_get_name(clk_parent)); return PTR_ERR(clk_parent); } clk_set_parent(clk_child, clk_parent); } #elif defined(CONFIG_SOC_EXYNOS5422) clk_child = clk_get(dev->device, "mout_aclk_333_user"); if (IS_ERR(clk_child)) { pr_err("failed to get %s clock\n", __clk_get_name(clk_child)); return PTR_ERR(clk_child); } clk_parent = clk_get(dev->device, "mout_aclk_333_sw"); if (IS_ERR(clk_parent)) { pr_err("failed to get %s clock\n", __clk_get_name(clk_parent)); return PTR_ERR(clk_parent); } clk_set_parent(clk_child, clk_parent); #elif defined(CONFIG_SOC_EXYNOS5433) clk_child = clk_get(dev->device, "mout_aclk_mfc_400_user"); if (IS_ERR(clk_child)) { pr_err("failed to get %s clock\n", __clk_get_name(clk_child)); return PTR_ERR(clk_child); } clk_parent = clk_get(dev->device, "aclk_mfc_400"); if (IS_ERR(clk_parent)) { clk_put(clk_child); pr_err("failed to get %s clock\n", __clk_get_name(clk_parent)); return PTR_ERR(clk_parent); } /* before set mux register, all source clock have to enabled */ clk_prepare_enable(clk_parent); if (clk_set_parent(clk_child, clk_parent)) { pr_err("Unable to set parent %s of clock %s \n", __clk_get_name(clk_parent), __clk_get_name(clk_child)); } /* expected mfc related ref clock value be set above 1 */ clk_put(clk_child); clk_put(clk_parent); #endif return 0; }
static int mxhci_hsic_init_clocks(struct mxhci_hsic_hcd *mxhci, u32 init) { int ret = 0; if (!init) goto disable_all_clks; /* 75Mhz system_clk required for normal hsic operation */ mxhci->system_clk = devm_clk_get(mxhci->dev, "system_clk"); if (IS_ERR(mxhci->system_clk)) { dev_err(mxhci->dev, "failed to get system_clk\n"); ret = PTR_ERR(mxhci->system_clk); goto out; } clk_set_rate(mxhci->system_clk, 75000000); /* 60Mhz core_clk required for LINK protocol engine */ mxhci->core_clk = devm_clk_get(mxhci->dev, "core_clk"); if (IS_ERR(mxhci->core_clk)) { dev_err(mxhci->dev, "failed to get core_clk\n"); ret = PTR_ERR(mxhci->core_clk); goto out; } clk_set_rate(mxhci->core_clk, 60000000); /* 480Mhz main HSIC phy clk */ mxhci->hsic_clk = devm_clk_get(mxhci->dev, "hsic_clk"); if (IS_ERR(mxhci->hsic_clk)) { dev_err(mxhci->dev, "failed to get hsic_clk\n"); ret = PTR_ERR(mxhci->hsic_clk); goto out; } clk_set_rate(mxhci->hsic_clk, 480000000); /* 19.2Mhz utmi_clk ref_clk required to shut off HSIC PLL */ mxhci->utmi_clk = devm_clk_get(mxhci->dev, "utmi_clk"); if (IS_ERR(mxhci->utmi_clk)) { dev_err(mxhci->dev, "failed to get utmi_clk\n"); ret = PTR_ERR(mxhci->utmi_clk); goto out; } clk_set_rate(mxhci->utmi_clk, 19200000); /* 32Khz phy sleep clk */ mxhci->phy_sleep_clk = devm_clk_get(mxhci->dev, "phy_sleep_clk"); if (IS_ERR(mxhci->phy_sleep_clk)) { dev_err(mxhci->dev, "failed to get phy_sleep_clk\n"); ret = PTR_ERR(mxhci->phy_sleep_clk); goto out; } clk_set_rate(mxhci->phy_sleep_clk, 32000); /* 10MHz cal_clk required for calibration of I/O pads */ mxhci->cal_clk = devm_clk_get(mxhci->dev, "cal_clk"); if (IS_ERR(mxhci->cal_clk)) { dev_err(mxhci->dev, "failed to get cal_clk\n"); ret = PTR_ERR(mxhci->cal_clk); goto out; } clk_set_rate(mxhci->cal_clk, 9600000); ret = clk_prepare_enable(mxhci->system_clk); if (ret) { dev_err(mxhci->dev, "failed to enable system_clk\n"); goto out; } /* enable force-on mode for periph_on */ clk_set_flags(mxhci->system_clk, CLKFLAG_RETAIN_PERIPH); ret = clk_prepare_enable(mxhci->core_clk); if (ret) { dev_err(mxhci->dev, "failed to enable core_clk\n"); goto err_core_clk; } ret = clk_prepare_enable(mxhci->hsic_clk); if (ret) { dev_err(mxhci->dev, "failed to enable hsic_clk\n"); goto err_hsic_clk; } ret = clk_prepare_enable(mxhci->utmi_clk); if (ret) { dev_err(mxhci->dev, "failed to enable utmi_clk\n"); goto err_utmi_clk; } ret = clk_prepare_enable(mxhci->cal_clk); if (ret) { dev_err(mxhci->dev, "failed to enable cal_clk\n"); goto err_cal_clk; } ret = clk_prepare_enable(mxhci->phy_sleep_clk); if (ret) { dev_err(mxhci->dev, "failed to enable phy_sleep_clk\n"); goto err_phy_sleep_clk; } return 0; disable_all_clks: clk_disable_unprepare(mxhci->phy_sleep_clk); if (mxhci->in_lpm) goto out; err_phy_sleep_clk: clk_disable_unprepare(mxhci->cal_clk); err_cal_clk: clk_disable_unprepare(mxhci->utmi_clk); err_utmi_clk: clk_disable_unprepare(mxhci->hsic_clk); err_hsic_clk: clk_disable_unprepare(mxhci->core_clk); err_core_clk: clk_disable_unprepare(mxhci->system_clk); out: return ret; }
static int bcm2079x_probe(struct i2c_client *client, const struct i2c_device_id *id) { int ret; struct bcm2079x_dev *bcm2079x_dev; struct bcm2079x_platform_data *platform_data; /*platform_data = client->dev.platform_data; dev_info(&client->dev, "%s, probing bcm2079x driver flags = %x\n", __func__, client->flags); if (platform_data == NULL) { dev_err(&client->dev, "nfc probe fail\n"); return -ENODEV; }*/ if (client ->dev.of_node) { platform_data = devm_kzalloc(&client ->dev,sizeof(struct bcm2079x_platform_data), GFP_KERNEL); if (!platform_data) { dev_err(&client ->dev, "Failed to allocate memory \n"); return -ENOMEM; } ret = bcm2079x_parse_dt(&client ->dev, platform_data); if (ret) return ret; } nfc_pinctrl_init(&client->dev); //ret = pinctrl_select_state(bcm2079x_pctrl.pinctrl, // bcm2079x_pctrl.nfc_gpio_state_active); // if (ret) // pr_err("%s:%d cannot set pin to nfc_gpio_state_active state", // __func__, __LINE__); if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { dev_err(&client->dev, "need I2C_FUNC_I2C\n"); return -ENODEV; } ret = gpio_request(platform_data->irq_gpio, "nfc_int"); if (ret) return -ENODEV; ret = gpio_request(platform_data->en_gpio, "nfc_ven"); if (ret) goto err_en; ret = gpio_request(platform_data->wake_gpio, "nfc_firm"); if (ret) goto err_firm; gpio_direction_output(platform_data->en_gpio, 0 ); gpio_direction_output(platform_data->wake_gpio, 0); gpio_set_value(platform_data->en_gpio, 0); gpio_set_value(platform_data->wake_gpio, 0); gpio_direction_input(platform_data->irq_gpio ); bcm2079x_dev = kzalloc(sizeof(*bcm2079x_dev), GFP_KERNEL); if (bcm2079x_dev == NULL) { dev_err(&client->dev, "failed to allocate memory for module data\n"); ret = -ENOMEM; goto err_exit; } bcm2079x_dev->wake_gpio = platform_data->wake_gpio; bcm2079x_dev->irq_gpio = platform_data->irq_gpio; bcm2079x_dev->en_gpio = platform_data->en_gpio; bcm2079x_dev->client = client; /* init mutex and queues */ init_waitqueue_head(&bcm2079x_dev->read_wq); mutex_init(&bcm2079x_dev->read_mutex); spin_lock_init(&bcm2079x_dev->irq_enabled_lock); bcm2079x_dev->bcm2079x_device.minor = MISC_DYNAMIC_MINOR; bcm2079x_dev->bcm2079x_device.name = "bcm2079x"; bcm2079x_dev->bcm2079x_device.fops = &bcm2079x_dev_fops; ret = misc_register(&bcm2079x_dev->bcm2079x_device); if (ret) { dev_err(&client->dev, "misc_register failed\n"); goto err_misc_register; } dev_info(&client->dev, "%s, saving address %d\n", __func__, client->addr); bcm2079x_dev->original_address = client->addr; //enable clk 19.2M printk("[dsc] enable clk 19.2M\n"); nfc_rf_clk = clk_get(&client->dev, "ref_clk"); if (nfc_rf_clk != NULL) { if (clk_prepare_enable(nfc_rf_clk)) pr_err("failed request NFC_CLK.\n"); } else { pr_err("%s:nfc_rf_clk is null\n",__FUNCTION__); } /* request irq. the irq is set whenever the chip has data available * for reading. it is cleared when all data has been read. */ dev_info(&client->dev, "requesting IRQ %d with IRQF_NO_SUSPEND\n", client->irq); bcm2079x_dev->irq_enabled = true; ret = request_irq(client->irq, bcm2079x_dev_irq_handler, IRQF_TRIGGER_RISING|IRQF_NO_SUSPEND, client->name, bcm2079x_dev); if (ret) { dev_err(&client->dev, "request_irq failed\n"); goto err_request_irq_failed; } enable_irq_wake(client->irq); bcm2079x_disable_irq(bcm2079x_dev); i2c_set_clientdata(client, bcm2079x_dev); #ifdef ZTEMT_FOR_NFC_PIN_TEST ret= create_sysfs_interfaces(&client->dev); if (ret < 0) { dev_err(&client->dev, "device drv2605 sysfs register failed\n"); return ret; } #endif dev_info(&client->dev, "%s, probing bcm2079x driver exited successfully\n", __func__); #ifdef USE_WAKE_LOCK wake_lock_init(&bcm2079x_dev->wake_lock , WAKE_LOCK_SUSPEND, "nfcwakelock" ); #endif return 0; err_request_irq_failed: misc_deregister(&bcm2079x_dev->bcm2079x_device); err_misc_register: mutex_destroy(&bcm2079x_dev->read_mutex); kfree(bcm2079x_dev); err_exit: gpio_free(platform_data->wake_gpio); err_firm: gpio_free(platform_data->en_gpio); err_en: gpio_free(platform_data->irq_gpio); return ret; }
int pcm512x_probe(struct device *dev, struct regmap *regmap) { struct pcm512x_priv *pcm512x; int i, ret; pcm512x = devm_kzalloc(dev, sizeof(struct pcm512x_priv), GFP_KERNEL); if (!pcm512x) return -ENOMEM; dev_set_drvdata(dev, pcm512x); pcm512x->regmap = regmap; for (i = 0; i < ARRAY_SIZE(pcm512x->supplies); i++) pcm512x->supplies[i].supply = pcm512x_supply_names[i]; ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(pcm512x->supplies), pcm512x->supplies); if (ret != 0) { dev_err(dev, "Failed to get supplies: %d\n", ret); return ret; } pcm512x->supply_nb[0].notifier_call = pcm512x_regulator_event_0; pcm512x->supply_nb[1].notifier_call = pcm512x_regulator_event_1; pcm512x->supply_nb[2].notifier_call = pcm512x_regulator_event_2; for (i = 0; i < ARRAY_SIZE(pcm512x->supplies); i++) { ret = regulator_register_notifier(pcm512x->supplies[i].consumer, &pcm512x->supply_nb[i]); if (ret != 0) { dev_err(dev, "Failed to register regulator notifier: %d\n", ret); } } ret = regulator_bulk_enable(ARRAY_SIZE(pcm512x->supplies), pcm512x->supplies); if (ret != 0) { dev_err(dev, "Failed to enable supplies: %d\n", ret); return ret; } /* Reset the device, verifying I/O in the process for I2C */ ret = regmap_write(regmap, PCM512x_RESET, PCM512x_RSTM | PCM512x_RSTR); if (ret != 0) { dev_err(dev, "Failed to reset device: %d\n", ret); goto err; } ret = regmap_write(regmap, PCM512x_RESET, 0); if (ret != 0) { dev_err(dev, "Failed to reset device: %d\n", ret); goto err; } pcm512x->sclk = devm_clk_get(dev, NULL); if (PTR_ERR(pcm512x->sclk) == -EPROBE_DEFER) return -EPROBE_DEFER; if (!IS_ERR(pcm512x->sclk)) { ret = clk_prepare_enable(pcm512x->sclk); if (ret != 0) { dev_err(dev, "Failed to enable SCLK: %d\n", ret); return ret; } } /* Default to standby mode */ ret = regmap_update_bits(pcm512x->regmap, PCM512x_POWER, PCM512x_RQST, PCM512x_RQST); if (ret != 0) { dev_err(dev, "Failed to request standby: %d\n", ret); goto err_clk; } pm_runtime_set_active(dev); pm_runtime_enable(dev); pm_runtime_idle(dev); #ifdef CONFIG_OF if (dev->of_node) { const struct device_node *np = dev->of_node; u32 val; if (of_property_read_u32(np, "pll-in", &val) >= 0) { if (val > 6) { dev_err(dev, "Invalid pll-in\n"); ret = -EINVAL; goto err_clk; } pcm512x->pll_in = val; } if (of_property_read_u32(np, "pll-out", &val) >= 0) { if (val > 6) { dev_err(dev, "Invalid pll-out\n"); ret = -EINVAL; goto err_clk; } pcm512x->pll_out = val; } if (!pcm512x->pll_in != !pcm512x->pll_out) { dev_err(dev, "Error: both pll-in and pll-out, or none\n"); ret = -EINVAL; goto err_clk; } if (pcm512x->pll_in && pcm512x->pll_in == pcm512x->pll_out) { dev_err(dev, "Error: pll-in == pll-out\n"); ret = -EINVAL; goto err_clk; } } #endif ret = snd_soc_register_codec(dev, &pcm512x_codec_driver, &pcm512x_dai, 1); if (ret != 0) { dev_err(dev, "Failed to register CODEC: %d\n", ret); goto err_pm; } return 0; err_pm: pm_runtime_disable(dev); err_clk: if (!IS_ERR(pcm512x->sclk)) clk_disable_unprepare(pcm512x->sclk); err: regulator_bulk_disable(ARRAY_SIZE(pcm512x->supplies), pcm512x->supplies); return ret; }
/** * cdns_i2c_probe - Platform registration call * @pdev: Handle to the platform device structure * * This function does all the memory allocation and registration for the i2c * device. User can modify the address mode to 10 bit address mode using the * ioctl call with option I2C_TENBIT. * * Return: 0 on success, negative error otherwise */ static int cdns_i2c_probe(struct platform_device *pdev) { struct resource *r_mem; struct cdns_i2c *id; int ret; const struct of_device_id *match; id = devm_kzalloc(&pdev->dev, sizeof(*id), GFP_KERNEL); if (!id) return -ENOMEM; id->dev = &pdev->dev; platform_set_drvdata(pdev, id); match = of_match_node(cdns_i2c_of_match, pdev->dev.of_node); if (match && match->data) { const struct cdns_platform_data *data = match->data; id->quirks = data->quirks; } r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); id->membase = devm_ioremap_resource(&pdev->dev, r_mem); if (IS_ERR(id->membase)) return PTR_ERR(id->membase); id->irq = platform_get_irq(pdev, 0); id->adap.owner = THIS_MODULE; id->adap.dev.of_node = pdev->dev.of_node; id->adap.algo = &cdns_i2c_algo; id->adap.timeout = CDNS_I2C_TIMEOUT; id->adap.retries = 3; /* Default retry value. */ id->adap.algo_data = id; id->adap.dev.parent = &pdev->dev; init_completion(&id->xfer_done); snprintf(id->adap.name, sizeof(id->adap.name), "Cadence I2C at %08lx", (unsigned long)r_mem->start); id->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(id->clk)) { dev_err(&pdev->dev, "input clock not found.\n"); return PTR_ERR(id->clk); } ret = clk_prepare_enable(id->clk); if (ret) dev_err(&pdev->dev, "Unable to enable clock.\n"); pm_runtime_enable(id->dev); pm_runtime_set_autosuspend_delay(id->dev, CNDS_I2C_PM_TIMEOUT); pm_runtime_use_autosuspend(id->dev); pm_runtime_set_active(id->dev); id->clk_rate_change_nb.notifier_call = cdns_i2c_clk_notifier_cb; if (clk_notifier_register(id->clk, &id->clk_rate_change_nb)) dev_warn(&pdev->dev, "Unable to register clock notifier.\n"); id->input_clk = clk_get_rate(id->clk); ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency", &id->i2c_clk); if (ret || (id->i2c_clk > CDNS_I2C_SPEED_MAX)) id->i2c_clk = CDNS_I2C_SPEED_DEFAULT; cdns_i2c_writereg(CDNS_I2C_CR_ACK_EN | CDNS_I2C_CR_NEA | CDNS_I2C_CR_MS, CDNS_I2C_CR_OFFSET); ret = cdns_i2c_setclk(id->input_clk, id); if (ret) { dev_err(&pdev->dev, "invalid SCL clock: %u Hz\n", id->i2c_clk); ret = -EINVAL; goto err_clk_dis; } ret = devm_request_irq(&pdev->dev, id->irq, cdns_i2c_isr, 0, DRIVER_NAME, id); if (ret) { dev_err(&pdev->dev, "cannot get irq %d\n", id->irq); goto err_clk_dis; } ret = i2c_add_adapter(&id->adap); if (ret < 0) { dev_err(&pdev->dev, "reg adap failed: %d\n", ret); goto err_clk_dis; } /* * Cadence I2C controller has a bug wherein it generates * invalid read transaction after HW timeout in master receiver mode. * HW timeout is not used by this driver and the interrupt is disabled. * But the feature itself cannot be disabled. Hence maximum value * is written to this register to reduce the chances of error. */ cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET); dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n", id->i2c_clk / 1000, (unsigned long)r_mem->start, id->irq); return 0; err_clk_dis: clk_disable_unprepare(id->clk); pm_runtime_set_suspended(&pdev->dev); pm_runtime_disable(&pdev->dev); return ret; }
static int omap_bandgap_probe(struct platform_device *pdev) { struct omap_bandgap *bg_ptr; int clk_rate, ret = 0, i; bg_ptr = omap_bandgap_build(pdev); if (IS_ERR_OR_NULL(bg_ptr)) { dev_err(&pdev->dev, "failed to fetch platform data\n"); return PTR_ERR(bg_ptr); } bg_ptr->dev = &pdev->dev; if (OMAP_BANDGAP_HAS(bg_ptr, TSHUT)) { ret = omap_bandgap_tshut_init(bg_ptr, pdev); if (ret) { dev_err(&pdev->dev, "failed to initialize system tshut IRQ\n"); return ret; } } bg_ptr->fclock = clk_get(NULL, bg_ptr->conf->fclock_name); ret = IS_ERR_OR_NULL(bg_ptr->fclock); if (ret) { dev_err(&pdev->dev, "failed to request fclock reference\n"); goto free_irqs; } bg_ptr->div_clk = clk_get(NULL, bg_ptr->conf->div_ck_name); ret = IS_ERR_OR_NULL(bg_ptr->div_clk); if (ret) { dev_err(&pdev->dev, "failed to request div_ts_ck clock ref\n"); goto free_irqs; } for (i = 0; i < bg_ptr->conf->sensor_count; i++) { struct temp_sensor_registers *tsr; u32 val; tsr = bg_ptr->conf->sensors[i].registers; /* * check if the efuse has a non-zero value if not * it is an untrimmed sample and the temperatures * may not be accurate */ val = omap_bandgap_readl(bg_ptr, tsr->bgap_efuse); if (ret || !val) dev_info(&pdev->dev, "Non-trimmed BGAP, Temp not accurate\n"); } clk_rate = clk_round_rate(bg_ptr->div_clk, bg_ptr->conf->sensors[0].ts_data->max_freq); if (clk_rate < bg_ptr->conf->sensors[0].ts_data->min_freq || clk_rate == 0xffffffff) { ret = -ENODEV; dev_err(&pdev->dev, "wrong clock rate (%d)\n", clk_rate); goto put_clks; } ret = clk_set_rate(bg_ptr->div_clk, clk_rate); if (ret) dev_err(&pdev->dev, "Cannot re-set clock rate. Continuing\n"); bg_ptr->clk_rate = clk_rate; if (OMAP_BANDGAP_HAS(bg_ptr, CLK_CTRL)) clk_prepare_enable(bg_ptr->fclock); mutex_init(&bg_ptr->bg_mutex); bg_ptr->dev = &pdev->dev; platform_set_drvdata(pdev, bg_ptr); omap_bandgap_power(bg_ptr, true); /* Set default counter to 1 for now */ if (OMAP_BANDGAP_HAS(bg_ptr, COUNTER)) for (i = 0; i < bg_ptr->conf->sensor_count; i++) configure_temp_sensor_counter(bg_ptr, i, 1); for (i = 0; i < bg_ptr->conf->sensor_count; i++) { struct temp_sensor_data *ts_data; ts_data = bg_ptr->conf->sensors[i].ts_data; if (OMAP_BANDGAP_HAS(bg_ptr, TALERT)) temp_sensor_init_talert_thresholds(bg_ptr, i, ts_data->t_hot, ts_data->t_cold); if (OMAP_BANDGAP_HAS(bg_ptr, TSHUT_CONFIG)) { temp_sensor_configure_tshut_hot(bg_ptr, i, ts_data->tshut_hot); temp_sensor_configure_tshut_cold(bg_ptr, i, ts_data->tshut_cold); } } if (OMAP_BANDGAP_HAS(bg_ptr, MODE_CONFIG)) enable_continuous_mode(bg_ptr); /* Set .250 seconds time as default counter */ if (OMAP_BANDGAP_HAS(bg_ptr, COUNTER)) for (i = 0; i < bg_ptr->conf->sensor_count; i++) configure_temp_sensor_counter(bg_ptr, i, bg_ptr->clk_rate / 4); /* Every thing is good? Then expose the sensors */ for (i = 0; i < bg_ptr->conf->sensor_count; i++) { char *domain; if (bg_ptr->conf->sensors[i].register_cooling) bg_ptr->conf->sensors[i].register_cooling(bg_ptr, i); domain = bg_ptr->conf->sensors[i].domain; if (bg_ptr->conf->expose_sensor) bg_ptr->conf->expose_sensor(bg_ptr, i, domain); } /* * Enable the Interrupts once everything is set. Otherwise irq handler * might be called as soon as it is enabled where as rest of framework * is still getting initialised. */ if (OMAP_BANDGAP_HAS(bg_ptr, TALERT)) { ret = omap_bandgap_talert_init(bg_ptr, pdev); if (ret) { dev_err(&pdev->dev, "failed to initialize Talert IRQ\n"); i = bg_ptr->conf->sensor_count; goto disable_clk; } } return 0; disable_clk: if (OMAP_BANDGAP_HAS(bg_ptr, CLK_CTRL)) clk_disable_unprepare(bg_ptr->fclock); put_clks: clk_put(bg_ptr->fclock); clk_put(bg_ptr->div_clk); free_irqs: if (OMAP_BANDGAP_HAS(bg_ptr, TSHUT)) { free_irq(gpio_to_irq(bg_ptr->tshut_gpio), NULL); gpio_free(bg_ptr->tshut_gpio); } return ret; }
static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable) { int phy_iface = bsp_priv->phy_iface; if (enable) { if (!bsp_priv->clk_enabled) { if (phy_iface == PHY_INTERFACE_MODE_RMII) { if (!IS_ERR(bsp_priv->mac_clk_rx)) clk_prepare_enable( bsp_priv->mac_clk_rx); if (!IS_ERR(bsp_priv->clk_mac_ref)) clk_prepare_enable( bsp_priv->clk_mac_ref); if (!IS_ERR(bsp_priv->clk_mac_refout)) clk_prepare_enable( bsp_priv->clk_mac_refout); } if (!IS_ERR(bsp_priv->clk_phy)) clk_prepare_enable(bsp_priv->clk_phy); if (!IS_ERR(bsp_priv->aclk_mac)) clk_prepare_enable(bsp_priv->aclk_mac); if (!IS_ERR(bsp_priv->pclk_mac)) clk_prepare_enable(bsp_priv->pclk_mac); if (!IS_ERR(bsp_priv->mac_clk_tx)) clk_prepare_enable(bsp_priv->mac_clk_tx); /** * if (!IS_ERR(bsp_priv->clk_mac)) * clk_prepare_enable(bsp_priv->clk_mac); */ mdelay(5); bsp_priv->clk_enabled = true; } } else { if (bsp_priv->clk_enabled) { if (phy_iface == PHY_INTERFACE_MODE_RMII) { clk_disable_unprepare(bsp_priv->mac_clk_rx); clk_disable_unprepare(bsp_priv->clk_mac_ref); clk_disable_unprepare(bsp_priv->clk_mac_refout); } clk_disable_unprepare(bsp_priv->clk_phy); clk_disable_unprepare(bsp_priv->aclk_mac); clk_disable_unprepare(bsp_priv->pclk_mac); clk_disable_unprepare(bsp_priv->mac_clk_tx); /** * if (!IS_ERR(bsp_priv->clk_mac)) * clk_disable_unprepare(bsp_priv->clk_mac); */ bsp_priv->clk_enabled = false; } } return 0; }
static int denali_dt_probe(struct platform_device *ofdev) { struct resource *denali_reg, *nand_data; struct denali_dt *dt; struct denali_nand_info *denali; int ret; const struct of_device_id *of_id; of_id = of_match_device(denali_nand_dt_ids, &ofdev->dev); if (of_id) { ofdev->id_entry = of_id->data; } else { pr_err("Failed to find the right device id.\n"); return -ENOMEM; } dt = devm_kzalloc(&ofdev->dev, sizeof(*dt), GFP_KERNEL); if (!dt) return -ENOMEM; denali = &dt->denali; denali_reg = platform_get_resource_byname(ofdev, IORESOURCE_MEM, "denali_reg"); nand_data = platform_get_resource_byname(ofdev, IORESOURCE_MEM, "nand_data"); if (!denali_reg || !nand_data) { dev_err(&ofdev->dev, "resources not completely defined\n"); return -EINVAL; } denali->platform = DT; denali->dev = &ofdev->dev; denali->irq = platform_get_irq(ofdev, 0); if (denali->irq < 0) { dev_err(&ofdev->dev, "no irq defined\n"); return -ENXIO; } denali->flash_reg = request_and_map(&ofdev->dev, denali_reg); if (!denali->flash_reg) return -ENOMEM; denali->flash_mem = request_and_map(&ofdev->dev, nand_data); if (!denali->flash_mem) return -ENOMEM; if (!of_property_read_u32(ofdev->dev.of_node, "dma-mask", (u32 *)&denali_dma_mask)) { denali->dev->dma_mask = &denali_dma_mask; } else { denali->dev->dma_mask = NULL; } dt->clk = clk_get(&ofdev->dev, NULL); if (IS_ERR(dt->clk)) { dev_err(&ofdev->dev, "no clk available\n"); return PTR_ERR(dt->clk); } clk_prepare_enable(dt->clk); ret = denali_init(denali); if (ret) goto out_disable_clk; platform_set_drvdata(ofdev, dt); return 0; out_disable_clk: clk_disable_unprepare(dt->clk); clk_put(dt->clk); return ret; }
static int imx_driver_probe(struct vmm_device *dev, const struct vmm_devtree_nodeid *devid) { int rc = VMM_EFAIL; struct clk *clk_ipg = NULL; struct clk *clk_uart = NULL; struct imx_port *port = NULL; unsigned long old_rate = 0; port = vmm_zalloc(sizeof(struct imx_port)); if (!port) { rc = VMM_ENOMEM; goto free_nothing; } rc = vmm_devtree_request_regmap(dev->of_node, &port->base, 0, "i.MX UART"); if (rc) { goto free_port; } if (vmm_devtree_read_u32(dev->of_node, "baudrate", &port->baudrate)) { port->baudrate = 115200; } rc = vmm_devtree_clock_frequency(dev->of_node, &port->input_clock); if (rc) { goto free_reg; } /* Setup clocks */ clk_ipg = of_clk_get(dev->of_node, 0); clk_uart = of_clk_get(dev->of_node, 1); if (!VMM_IS_ERR_OR_NULL(clk_ipg)) { rc = clk_prepare_enable(clk_ipg); if (rc) { goto free_reg; } } if (!VMM_IS_ERR_OR_NULL(clk_uart)) { rc = clk_prepare_enable(clk_uart); if (rc) { goto clk_disable_unprepare_ipg; } old_rate = clk_get_rate(clk_uart); if (clk_set_rate(clk_uart, port->input_clock)) { vmm_printf("Could not set %s clock rate to %"PRId32" Hz, " "actual rate: %lu Hz\n", __clk_get_name(clk_uart), port->input_clock, clk_get_rate(clk_uart)); rc = VMM_ERANGE; goto clk_disable_unprepare_uart; } } /* Register interrupt handler */ port->irq = vmm_devtree_irq_parse_map(dev->of_node, 0); if (!port->irq) { rc = VMM_ENODEV; goto clk_old_rate; } if ((rc = vmm_host_irq_register(port->irq, dev->name, imx_irq_handler, port))) { goto clk_old_rate; } /* Call low-level init function */ imx_lowlevel_init(port->base, port->baudrate, port->input_clock); /* Create Serial Port */ port->p = serial_create(dev, 256, imx_tx, port); if (VMM_IS_ERR_OR_NULL(port->p)) { rc = VMM_PTR_ERR(port->p); goto free_irq; } /* Save port pointer */ dev->priv = port; /* Unmask Rx, Mask Tx, and Enable UART */ port->mask = vmm_readl((void *)port->base + UCR1); port->mask |= (UCR1_RRDYEN | UCR1_UARTEN); port->mask &= ~(UCR1_TRDYEN); vmm_writel(port->mask, (void *)port->base + UCR1); return rc; free_irq: vmm_host_irq_unregister(port->irq, port); clk_old_rate: if (!VMM_IS_ERR_OR_NULL(clk_uart)) { if (old_rate) { clk_set_rate(clk_uart, old_rate); } } clk_disable_unprepare_uart: if (!VMM_IS_ERR_OR_NULL(clk_uart)) { clk_disable_unprepare(clk_uart); } clk_disable_unprepare_ipg: if (!VMM_IS_ERR_OR_NULL(clk_ipg)) { clk_disable_unprepare(clk_ipg); } free_reg: vmm_devtree_regunmap_release(dev->of_node, port->base, 0); free_port: vmm_free(port); free_nothing: return rc; }
static int broadcast_tdmb_fc8080_probe(struct spi_device *spi) { int rc; if(spi == NULL) { printk("broadcast_fc8080_probe spi is NULL, so spi can not be set\n"); return -1; } fc8080_ctrl_info.TdmbPowerOnState = FALSE; fc8080_ctrl_info.spi_ptr = spi; fc8080_ctrl_info.spi_ptr->mode = SPI_MODE_0; fc8080_ctrl_info.spi_ptr->bits_per_word = 8; fc8080_ctrl_info.spi_ptr->max_speed_hz = (16000*1000); fc8080_ctrl_info.pdev = to_platform_device(&spi->dev); #ifdef FEATURE_DMB_USE_BUS_SCALE fc8080_ctrl_info.bus_scale_pdata = msm_bus_cl_get_pdata(fc8080_ctrl_info.pdev); fc8080_ctrl_info.bus_scale_client_id = msm_bus_scale_register_client(fc8080_ctrl_info.bus_scale_pdata); #endif // Once I have a spi_device structure I can do a transfer anytime rc = spi_setup(spi); printk("broadcast_tdmb_fc8080_probe spi_setup=%d\n", rc); bbm_com_hostif_select(NULL, 1); #ifdef FEATURE_DMB_USE_XO fc8080_ctrl_info.clk = clk_get(&fc8080_ctrl_info.spi_ptr->dev, "tdmb_xo"); if (IS_ERR(fc8080_ctrl_info.clk)) { rc = PTR_ERR(fc8080_ctrl_info.clk); dev_err(&fc8080_ctrl_info.spi_ptr->dev, "could not get clock\n"); return rc; } /* We enable/disable the clock only to assure it works */ rc = clk_prepare_enable(fc8080_ctrl_info.clk); if (rc) { dev_err(&fc8080_ctrl_info.spi_ptr->dev, "could not enable clock\n"); return rc; } clk_disable_unprepare(fc8080_ctrl_info.clk); #endif #ifdef FEATURE_DMB_USE_WORKQUEUE INIT_WORK(&fc8080_ctrl_info.spi_work, broacast_tdmb_spi_work); fc8080_ctrl_info.spi_wq = create_singlethread_workqueue("tdmb_spi_wq"); if(fc8080_ctrl_info.spi_wq == NULL){ printk("Failed to setup tdmb spi workqueue \n"); return -ENOMEM; } #endif tdmb_configure_gpios(); #ifdef FEATURE_DMB_USE_PINCTRL tdmb_pinctrl_init(); #endif #ifdef FEATURE_DMB_USE_WORKQUEUE rc = request_irq(spi->irq, broadcast_tdmb_spi_isr, IRQF_DISABLED | IRQF_TRIGGER_FALLING, spi->dev.driver->name, &fc8080_ctrl_info); #else rc = request_threaded_irq(spi->irq, NULL, broadcast_tdmb_spi_event_handler, IRQF_ONESHOT | IRQF_DISABLED | IRQF_TRIGGER_FALLING, spi->dev.driver->name, &fc8080_ctrl_info); #endif printk("broadcast_tdmb_fc8080_probe request_irq=%d\n", rc); tdmb_fc8080_interrupt_lock(); mutex_init(&fc8080_ctrl_info.mutex); wake_lock_init(&fc8080_ctrl_info.wake_lock, WAKE_LOCK_SUSPEND, dev_name(&spi->dev)); spin_lock_init(&fc8080_ctrl_info.spin_lock); #ifdef FEATURE_DMB_USE_PM_QOS pm_qos_add_request(&fc8080_ctrl_info.pm_req_list, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); #endif printk("broadcast_fc8080_probe End\n"); return rc; }