static int msm_ehci_suspend(struct msm_hcd *mhcd) { struct msm_usb_host_platform_data *pdata; struct usb_hcd *hcd = mhcd_to_hcd(mhcd); unsigned long timeout; int ret; u32 portsc; pdata = mhcd->dev->platform_data; if (atomic_read(&mhcd->in_lpm)) { dev_dbg(mhcd->dev, "%s called in lpm\n", __func__); return 0; } disable_irq(hcd->irq); /* Set the PHCD bit, only if it is not set by the controller. * PHY may take some time or even fail to enter into low power * mode (LPM). Hence poll for 500 msec and reset the PHY and link * in failure case. */ portsc = readl_relaxed(USB_PORTSC); if (!(portsc & PORTSC_PHCD)) { writel_relaxed(portsc | PORTSC_PHCD, USB_PORTSC); timeout = jiffies + usecs_to_jiffies(PHY_SUSPEND_TIMEOUT_USEC); while (!(readl_relaxed(USB_PORTSC) & PORTSC_PHCD)) { if (time_after(jiffies, timeout)) { dev_err(mhcd->dev, "Unable to suspend PHY\n"); schedule_work(&mhcd->phy_susp_fail_work); return -ETIMEDOUT; } udelay(1); } } /* * PHY has capability to generate interrupt asynchronously in low * power mode (LPM). This interrupt is level triggered. So USB IRQ * line must be disabled till async interrupt enable bit is cleared * in USBCMD register. Assert STP (ULPI interface STOP signal) to * block data communication from PHY. */ writel_relaxed(readl_relaxed(USB_USBCMD) | ASYNC_INTR_CTRL | ULPI_STP_CTRL, USB_USBCMD); /* * Ensure that hardware is put in low power mode before * clocks are turned OFF and VDD is allowed to minimize. */ mb(); clk_disable_unprepare(mhcd->iface_clk); clk_disable_unprepare(mhcd->core_clk); /* usb phy does not require TCXO clock, hence vote for TCXO disable */ ret = msm_xo_mode_vote(mhcd->xo_handle, MSM_XO_MODE_OFF); if (ret) dev_err(mhcd->dev, "%s failed to devote for " "TCXO D0 buffer%d\n", __func__, ret); if (!pdata->mpm_xo_wakeup_int) msm_ehci_config_vddcx(mhcd, 0); atomic_set(&mhcd->in_lpm, 1); enable_irq(hcd->irq); if (mhcd->pmic_gpio_dp_irq) { mhcd->pmic_gpio_dp_irq_enabled = 1; enable_irq_wake(mhcd->pmic_gpio_dp_irq); enable_irq(mhcd->pmic_gpio_dp_irq); } wake_unlock(&mhcd->wlock); dev_info(mhcd->dev, "EHCI USB in low power mode\n"); return 0; }
static void __init global_timer_of_register(struct device_node *np) { struct clk *gt_clk; int err = 0; /* * In A9 r2p0 the comparators for each processor with the global timer * fire when the timer value is greater than or equal to. In previous * revisions the comparators fired when the timer value was equal to. */ if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9 && (read_cpuid_id() & 0xf0000f) < 0x200000) { pr_warn("global-timer: non support for this cpu version.\n"); return; } gt_ppi = irq_of_parse_and_map(np, 0); if (!gt_ppi) { pr_warn("global-timer: unable to parse irq\n"); return; } gt_base = of_iomap(np, 0); if (!gt_base) { pr_warn("global-timer: invalid base address\n"); return; } gt_clk = of_clk_get(np, 0); if (!IS_ERR(gt_clk)) { err = clk_prepare_enable(gt_clk); if (err) goto out_unmap; } else { pr_warn("global-timer: clk not found\n"); err = -EINVAL; goto out_unmap; } gt_clk_rate = clk_get_rate(gt_clk); gt_evt = alloc_percpu(struct clock_event_device); if (!gt_evt) { pr_warn("global-timer: can't allocate memory\n"); err = -ENOMEM; goto out_clk; } err = request_percpu_irq(gt_ppi, gt_clockevent_interrupt, "gt", gt_evt); if (err) { pr_warn("global-timer: can't register interrupt %d (%d)\n", gt_ppi, err); goto out_free; } err = register_cpu_notifier(>_cpu_nb); if (err) { pr_warn("global-timer: unable to register cpu notifier.\n"); goto out_irq; } /* Immediately configure the timer on the boot CPU */ gt_clocksource_init(); gt_clockevents_init(this_cpu_ptr(gt_evt)); return; out_irq: free_percpu_irq(gt_ppi, gt_evt); out_free: free_percpu(gt_evt); out_clk: clk_disable_unprepare(gt_clk); out_unmap: iounmap(gt_base); WARN(err, "ARM Global timer register failed (%d)\n", err); }
static int spdif_probe(struct platform_device *pdev) { struct s3c_audio_pdata *spdif_pdata; struct resource *mem_res, *dma_res; struct samsung_spdif_info *spdif; int ret; spdif_pdata = pdev->dev.platform_data; dev_dbg(&pdev->dev, "Entered %s\n", __func__); dma_res = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (!dma_res) { dev_err(&pdev->dev, "Unable to get dma resource.\n"); return -ENXIO; } mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem_res) { dev_err(&pdev->dev, "Unable to get register resource.\n"); return -ENXIO; } if (spdif_pdata && spdif_pdata->cfg_gpio && spdif_pdata->cfg_gpio(pdev)) { dev_err(&pdev->dev, "Unable to configure GPIO pins\n"); return -EINVAL; } spdif = &spdif_info; spdif->dev = &pdev->dev; spin_lock_init(&spdif->lock); spdif->pclk = devm_clk_get(&pdev->dev, "spdif"); if (IS_ERR(spdif->pclk)) { dev_err(&pdev->dev, "failed to get peri-clock\n"); ret = -ENOENT; goto err0; } clk_prepare_enable(spdif->pclk); spdif->sclk = devm_clk_get(&pdev->dev, "sclk_spdif"); if (IS_ERR(spdif->sclk)) { dev_err(&pdev->dev, "failed to get internal source clock\n"); ret = -ENOENT; goto err1; } clk_prepare_enable(spdif->sclk); /* Request S/PDIF Register's memory region */ if (!request_mem_region(mem_res->start, resource_size(mem_res), "samsung-spdif")) { dev_err(&pdev->dev, "Unable to request register region\n"); ret = -EBUSY; goto err2; } spdif->regs = ioremap(mem_res->start, 0x100); if (spdif->regs == NULL) { dev_err(&pdev->dev, "Cannot ioremap registers\n"); ret = -ENXIO; goto err3; } dev_set_drvdata(&pdev->dev, spdif); ret = devm_snd_soc_register_component(&pdev->dev, &samsung_spdif_component, &samsung_spdif_dai, 1); if (ret != 0) { dev_err(&pdev->dev, "fail to register dai\n"); goto err4; } spdif_stereo_out.dma_size = 2; spdif_stereo_out.dma_addr = mem_res->start + DATA_OUTBUF; spdif_stereo_out.channel = dma_res->start; spdif->dma_playback = &spdif_stereo_out; ret = samsung_asoc_dma_platform_register(&pdev->dev); if (ret) { dev_err(&pdev->dev, "failed to register DMA: %d\n", ret); goto err4; } return 0; err4: iounmap(spdif->regs); err3: release_mem_region(mem_res->start, resource_size(mem_res)); err2: clk_disable_unprepare(spdif->sclk); err1: clk_disable_unprepare(spdif->pclk); err0: return ret; }
static int ci_hdrc_imx_probe(struct platform_device *pdev) { struct ci_hdrc_imx_data *data; struct ci_hdrc_platform_data pdata = { .name = "ci_hdrc_imx", .capoffset = DEF_CAPOFFSET, .flags = CI_HDRC_REQUIRE_TRANSCEIVER | CI_HDRC_DISABLE_STREAMING, }; int ret; data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); if (!data) { dev_err(&pdev->dev, "Failed to allocate ci_hdrc-imx data!\n"); return -ENOMEM; } data->usbmisc_data = usbmisc_get_init_data(&pdev->dev); if (IS_ERR(data->usbmisc_data)) return PTR_ERR(data->usbmisc_data); data->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(data->clk)) { dev_err(&pdev->dev, "Failed to get clock, err=%ld\n", PTR_ERR(data->clk)); return PTR_ERR(data->clk); } ret = clk_prepare_enable(data->clk); if (ret) { dev_err(&pdev->dev, "Failed to prepare or enable clock, err=%d\n", ret); return ret; } data->phy = devm_usb_get_phy_by_phandle(&pdev->dev, "fsl,usbphy", 0); if (IS_ERR(data->phy)) { ret = PTR_ERR(data->phy); goto err_clk; } pdata.phy = data->phy; ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (ret) goto err_clk; if (data->usbmisc_data) { ret = imx_usbmisc_init(data->usbmisc_data); if (ret) { dev_err(&pdev->dev, "usbmisc init failed, ret=%d\n", ret); goto err_clk; } } data->ci_pdev = ci_hdrc_add_device(&pdev->dev, pdev->resource, pdev->num_resources, &pdata); if (IS_ERR(data->ci_pdev)) { ret = PTR_ERR(data->ci_pdev); dev_err(&pdev->dev, "Can't register ci_hdrc platform device, err=%d\n", ret); goto err_clk; } if (data->usbmisc_data) { ret = imx_usbmisc_init_post(data->usbmisc_data); if (ret) { dev_err(&pdev->dev, "usbmisc post failed, ret=%d\n", ret); goto disable_device; } } platform_set_drvdata(pdev, data); pm_runtime_no_callbacks(&pdev->dev); pm_runtime_enable(&pdev->dev); return 0; disable_device: ci_hdrc_remove_device(data->ci_pdev); err_clk: clk_disable_unprepare(data->clk); return ret; } static int ci_hdrc_imx_remove(struct platform_device *pdev) { struct ci_hdrc_imx_data *data = platform_get_drvdata(pdev); pm_runtime_disable(&pdev->dev); ci_hdrc_remove_device(data->ci_pdev); clk_disable_unprepare(data->clk); return 0; } static const struct of_device_id ci_hdrc_imx_dt_ids[] = { { .compatible = "fsl,imx27-usb", }, { /* sentinel */ } };
static int rockchip_spi_probe(struct platform_device *pdev) { struct resource *mem_res; struct rockchip_spi_driver_data *sdd; struct rockchip_spi_info *info = pdev->dev.platform_data; struct dw_spi *dws; int ret, irq; char clk_name[16]; if (!info && pdev->dev.of_node) { info = rockchip_spi_parse_dt(&pdev->dev); if (IS_ERR(info)) return PTR_ERR(info); } if (!info) { dev_err(&pdev->dev, "platform_data missing!\n"); return -ENODEV; } sdd = kzalloc(sizeof(struct rockchip_spi_driver_data), GFP_KERNEL); if (!sdd) { ret = -ENOMEM; goto err_kfree; } sdd->pdev = pdev; sdd->info = info; dws = &sdd->dws; atomic_set(&dws->debug_flag, 0);//debug flag /* Get basic io resource and map it */ irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_warn(&pdev->dev, "Failed to get IRQ: %d\n", irq); return irq; } mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (mem_res == NULL) { dev_err(&pdev->dev, "Unable to get SPI MEM resource\n"); ret = -ENXIO; goto err_unmap; } dws->regs = ioremap(mem_res->start, (mem_res->end - mem_res->start) + 1); if (!dws->regs){ ret = -EBUSY; goto err_unmap; } dws->paddr = mem_res->start; dws->iolen = (mem_res->end - mem_res->start) + 1; printk(KERN_INFO "dws->regs: %p\n", dws->regs); //get bus num if (pdev->dev.of_node) { ret = of_alias_get_id(pdev->dev.of_node, "spi"); if (ret < 0) { dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret); goto err_release_mem; } info->bus_num = ret; } else { info->bus_num = pdev->id; } /* Setup clocks */ sdd->clk_spi = devm_clk_get(&pdev->dev, "spi"); if (IS_ERR(sdd->clk_spi)) { dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n"); ret = PTR_ERR(sdd->clk_spi); goto err_clk; } if (clk_prepare_enable(sdd->clk_spi)) { dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n"); ret = -EBUSY; goto err_clk; } sprintf(clk_name, "pclk_spi%d", info->src_clk_nr); sdd->pclk_spi = devm_clk_get(&pdev->dev, clk_name); if (IS_ERR(sdd->pclk_spi)) { dev_err(&pdev->dev, "Unable to acquire clock '%s'\n", clk_name); ret = PTR_ERR(sdd->pclk_spi); goto err_pclk; } if (clk_prepare_enable(sdd->pclk_spi)) { dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", clk_name); ret = -EBUSY; goto err_pclk; } clk_set_rate(sdd->clk_spi, info->spi_freq); dws->max_freq = clk_get_rate(sdd->clk_spi); dws->parent_dev = &pdev->dev; dws->bus_num = info->bus_num; dws->num_cs = info->num_cs; dws->irq = irq; dws->clk_spi = sdd->clk_spi; dws->pclk_spi = sdd->pclk_spi; /* * handling for rockchip paltforms, like dma setup, * clock rate, FIFO depth. */ #ifdef CONFIG_SPI_ROCKCHIP_DMA ret = dw_spi_dma_init(dws); if (ret) printk("%s:fail to init dma\n",__func__); #endif ret = dw_spi_add_host(dws); if (ret) goto err_release_mem; platform_set_drvdata(pdev, sdd); printk("%s:num_cs=%d,irq=%d,freq=%d ok\n",__func__, info->num_cs, irq, dws->max_freq); return 0; err_release_mem: release_mem_region(mem_res->start, (mem_res->end - mem_res->start) + 1); err_pclk: clk_disable_unprepare(sdd->pclk_spi); err_clk: clk_disable_unprepare(sdd->clk_spi); err_unmap: iounmap(dws->regs); err_kfree: kfree(sdd); return ret; }
static int spi_qup_probe(struct platform_device *pdev) { struct spi_master *master; struct clk *iclk, *cclk; struct spi_qup *controller; struct resource *res; struct device *dev; void __iomem *base; u32 max_freq, iomode, num_cs; int ret, irq, size; dev = &pdev->dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); base = devm_ioremap_resource(dev, res); if (IS_ERR(base)) return PTR_ERR(base); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; cclk = devm_clk_get(dev, "core"); if (IS_ERR(cclk)) return PTR_ERR(cclk); iclk = devm_clk_get(dev, "iface"); if (IS_ERR(iclk)) return PTR_ERR(iclk); /* This is optional parameter */ if (of_property_read_u32(dev->of_node, "spi-max-frequency", &max_freq)) max_freq = SPI_MAX_RATE; if (!max_freq || max_freq > SPI_MAX_RATE) { dev_err(dev, "invalid clock frequency %d\n", max_freq); return -ENXIO; } ret = clk_prepare_enable(cclk); if (ret) { dev_err(dev, "cannot enable core clock\n"); return ret; } ret = clk_prepare_enable(iclk); if (ret) { clk_disable_unprepare(cclk); dev_err(dev, "cannot enable iface clock\n"); return ret; } master = spi_alloc_master(dev, sizeof(struct spi_qup)); if (!master) { clk_disable_unprepare(cclk); clk_disable_unprepare(iclk); dev_err(dev, "cannot allocate master\n"); return -ENOMEM; } /* use num-cs unless not present or out of range */ if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) || num_cs > SPI_NUM_CHIPSELECTS) master->num_chipselect = SPI_NUM_CHIPSELECTS; else master->num_chipselect = num_cs; master->bus_num = pdev->id; master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); master->max_speed_hz = max_freq; master->transfer_one = spi_qup_transfer_one; master->dev.of_node = pdev->dev.of_node; master->auto_runtime_pm = true; master->dma_alignment = dma_get_cache_alignment(); master->max_dma_len = SPI_MAX_DMA_XFER; platform_set_drvdata(pdev, master); controller = spi_master_get_devdata(master); controller->dev = dev; controller->base = base; controller->iclk = iclk; controller->cclk = cclk; controller->irq = irq; ret = spi_qup_init_dma(master, res->start); if (ret == -EPROBE_DEFER) goto error; else if (!ret) master->can_dma = spi_qup_can_dma; /* set v1 flag if device is version 1 */ if (of_device_is_compatible(dev->of_node, "qcom,spi-qup-v1.1.1")) controller->qup_v1 = 1; spin_lock_init(&controller->lock); init_completion(&controller->done); iomode = readl_relaxed(base + QUP_IO_M_MODES); size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode); if (size) controller->out_blk_sz = size * 16; else controller->out_blk_sz = 4; size = QUP_IO_M_INPUT_BLOCK_SIZE(iomode); if (size) controller->in_blk_sz = size * 16; else controller->in_blk_sz = 4; size = QUP_IO_M_OUTPUT_FIFO_SIZE(iomode); controller->out_fifo_sz = controller->out_blk_sz * (2 << size); size = QUP_IO_M_INPUT_FIFO_SIZE(iomode); controller->in_fifo_sz = controller->in_blk_sz * (2 << size); dev_info(dev, "IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n", controller->in_blk_sz, controller->in_fifo_sz, controller->out_blk_sz, controller->out_fifo_sz); writel_relaxed(1, base + QUP_SW_RESET); ret = spi_qup_set_state(controller, QUP_STATE_RESET); if (ret) { dev_err(dev, "cannot set RESET state\n"); goto error_dma; } writel_relaxed(0, base + QUP_OPERATIONAL); writel_relaxed(0, base + QUP_IO_M_MODES); if (!controller->qup_v1) writel_relaxed(0, base + QUP_OPERATIONAL_MASK); writel_relaxed(SPI_ERROR_CLK_UNDER_RUN | SPI_ERROR_CLK_OVER_RUN, base + SPI_ERROR_FLAGS_EN); /* if earlier version of the QUP, disable INPUT_OVERRUN */ if (controller->qup_v1) writel_relaxed(QUP_ERROR_OUTPUT_OVER_RUN | QUP_ERROR_INPUT_UNDER_RUN | QUP_ERROR_OUTPUT_UNDER_RUN, base + QUP_ERROR_FLAGS_EN); writel_relaxed(0, base + SPI_CONFIG); writel_relaxed(SPI_IO_C_NO_TRI_STATE, base + SPI_IO_CONTROL); ret = devm_request_irq(dev, irq, spi_qup_qup_irq, IRQF_TRIGGER_HIGH, pdev->name, controller); if (ret) goto error_dma; pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC); pm_runtime_use_autosuspend(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); ret = devm_spi_register_master(dev, master); if (ret) goto disable_pm; return 0; disable_pm: pm_runtime_disable(&pdev->dev); error_dma: spi_qup_release_dma(master); error: clk_disable_unprepare(cclk); clk_disable_unprepare(iclk); spi_master_put(master); return ret; }
static int uniphier_uart_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct uart_8250_port up; struct uniphier8250_priv *priv; struct resource *regs; void __iomem *membase; int irq; int ret; regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!regs) { dev_err(dev, "failed to get memory resource"); return -EINVAL; } membase = devm_ioremap(dev, regs->start, resource_size(regs)); if (!membase) return -ENOMEM; irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(dev, "failed to get IRQ number"); return irq; } priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; memset(&up, 0, sizeof(up)); ret = uniphier_of_serial_setup(dev, &up.port, priv); if (ret < 0) return ret; spin_lock_init(&priv->atomic_write_lock); up.port.dev = dev; up.port.private_data = priv; up.port.mapbase = regs->start; up.port.mapsize = resource_size(regs); up.port.membase = membase; up.port.irq = irq; up.port.type = PORT_16550A; up.port.iotype = UPIO_MEM32; up.port.regshift = 2; up.port.flags = UPF_FIXED_PORT | UPF_FIXED_TYPE; up.capabilities = UART_CAP_FIFO; up.port.serial_in = uniphier_serial_in; up.port.serial_out = uniphier_serial_out; up.dl_read = uniphier_serial_dl_read; up.dl_write = uniphier_serial_dl_write; ret = serial8250_register_8250_port(&up); if (ret < 0) { dev_err(dev, "failed to register 8250 port\n"); clk_disable_unprepare(priv->clk); return ret; } platform_set_drvdata(pdev, priv); return 0; }
static int ehci_orion_drv_probe(struct platform_device *pdev) { struct orion_ehci_data *pd = dev_get_platdata(&pdev->dev); const struct mbus_dram_target_info *dram; struct resource *res; struct usb_hcd *hcd; struct ehci_hcd *ehci; void __iomem *regs; int irq, err; enum orion_ehci_phy_ver phy_version; struct orion_ehci_hcd *priv; if (usb_disabled()) return -ENODEV; pr_debug("Initializing Orion-SoC USB Host Controller\n"); irq = platform_get_irq(pdev, 0); if (irq <= 0) { dev_err(&pdev->dev, "Found HC with no IRQ. Check %s setup!\n", dev_name(&pdev->dev)); err = -ENODEV; goto err; } /* * Right now device-tree probed devices don't get dma_mask * set. Since shared usb code relies on it, set it here for * now. Once we have dma capability bindings this can go away. */ err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) goto err; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(regs)) { err = PTR_ERR(regs); goto err; } hcd = usb_create_hcd(&ehci_orion_hc_driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) { err = -ENOMEM; goto err; } hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); hcd->regs = regs; ehci = hcd_to_ehci(hcd); ehci->caps = hcd->regs + 0x100; hcd->has_tt = 1; priv = hcd_to_orion_priv(hcd); /* * Not all platforms can gate the clock, so it is not an error if * the clock does not exists. */ priv->clk = devm_clk_get(&pdev->dev, NULL); if (!IS_ERR(priv->clk)) clk_prepare_enable(priv->clk); priv->phy = devm_phy_optional_get(&pdev->dev, "usb"); if (IS_ERR(priv->phy)) { err = PTR_ERR(priv->phy); goto err_phy_get; } else { err = phy_init(priv->phy); if (err) goto err_phy_init; err = phy_power_on(priv->phy); if (err) goto err_phy_power_on; } /* * (Re-)program MBUS remapping windows if we are asked to. */ dram = mv_mbus_dram_info(); if (dram) ehci_orion_conf_mbus_windows(hcd, dram); /* * setup Orion USB controller. */ if (pdev->dev.of_node) phy_version = EHCI_PHY_NA; else phy_version = pd->phy_version; switch (phy_version) { case EHCI_PHY_NA: /* dont change USB phy settings */ break; case EHCI_PHY_ORION: orion_usb_phy_v1_setup(hcd); break; case EHCI_PHY_DD: case EHCI_PHY_KW: default: dev_warn(&pdev->dev, "USB phy version isn't supported.\n"); } err = usb_add_hcd(hcd, irq, IRQF_SHARED); if (err) goto err_add_hcd; device_wakeup_enable(hcd->self.controller); return 0; err_add_hcd: if (!IS_ERR(priv->phy)) phy_power_off(priv->phy); err_phy_power_on: if (!IS_ERR(priv->phy)) phy_exit(priv->phy); err_phy_init: err_phy_get: if (!IS_ERR(priv->clk)) clk_disable_unprepare(priv->clk); usb_put_hcd(hcd); err: dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), err); return err; }
static int sdhci_msm_probe(struct platform_device *pdev) { struct sdhci_host *host; struct sdhci_pltfm_host *pltfm_host; struct sdhci_msm_host *msm_host; struct resource *core_memres; struct clk *clk; int ret; u16 host_version, core_minor; u32 core_version, config; u8 core_major; host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host)); if (IS_ERR(host)) return PTR_ERR(host); host->sdma_boundary = 0; pltfm_host = sdhci_priv(host); msm_host = sdhci_pltfm_priv(pltfm_host); msm_host->mmc = host->mmc; msm_host->pdev = pdev; ret = mmc_of_parse(host->mmc); if (ret) goto pltfm_free; sdhci_get_of_property(pdev); msm_host->saved_tuning_phase = INVALID_TUNING_PHASE; /* Setup SDCC bus voter clock. */ msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus"); if (!IS_ERR(msm_host->bus_clk)) { /* Vote for max. clk rate for max. performance */ ret = clk_set_rate(msm_host->bus_clk, INT_MAX); if (ret) goto pltfm_free; ret = clk_prepare_enable(msm_host->bus_clk); if (ret) goto pltfm_free; } /* Setup main peripheral bus clock */ clk = devm_clk_get(&pdev->dev, "iface"); if (IS_ERR(clk)) { ret = PTR_ERR(clk); dev_err(&pdev->dev, "Peripheral clk setup failed (%d)\n", ret); goto bus_clk_disable; } msm_host->bulk_clks[1].clk = clk; /* Setup SDC MMC clock */ clk = devm_clk_get(&pdev->dev, "core"); if (IS_ERR(clk)) { ret = PTR_ERR(clk); dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret); goto bus_clk_disable; } msm_host->bulk_clks[0].clk = clk; /* Vote for maximum clock rate for maximum performance */ ret = clk_set_rate(clk, INT_MAX); if (ret) dev_warn(&pdev->dev, "core clock boost failed\n"); clk = devm_clk_get(&pdev->dev, "cal"); if (IS_ERR(clk)) clk = NULL; msm_host->bulk_clks[2].clk = clk; clk = devm_clk_get(&pdev->dev, "sleep"); if (IS_ERR(clk)) clk = NULL; msm_host->bulk_clks[3].clk = clk; ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks), msm_host->bulk_clks); if (ret) goto bus_clk_disable; /* * xo clock is needed for FLL feature of cm_dll. * In case if xo clock is not mentioned in DT, warn and proceed. */ msm_host->xo_clk = devm_clk_get(&pdev->dev, "xo"); if (IS_ERR(msm_host->xo_clk)) { ret = PTR_ERR(msm_host->xo_clk); dev_warn(&pdev->dev, "TCXO clk not present (%d)\n", ret); } core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1); msm_host->core_mem = devm_ioremap_resource(&pdev->dev, core_memres); if (IS_ERR(msm_host->core_mem)) { dev_err(&pdev->dev, "Failed to remap registers\n"); ret = PTR_ERR(msm_host->core_mem); goto clk_disable; } /* Reset the vendor spec register to power on reset state */ writel_relaxed(CORE_VENDOR_SPEC_POR_VAL, host->ioaddr + CORE_VENDOR_SPEC); /* Set HC_MODE_EN bit in HC_MODE register */ writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE)); config = readl_relaxed(msm_host->core_mem + CORE_HC_MODE); config |= FF_CLK_SW_RST_DIS; writel_relaxed(config, msm_host->core_mem + CORE_HC_MODE); host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION)); dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n", host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT)); core_version = readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION); core_major = (core_version & CORE_VERSION_MAJOR_MASK) >> CORE_VERSION_MAJOR_SHIFT; core_minor = core_version & CORE_VERSION_MINOR_MASK; dev_dbg(&pdev->dev, "MCI Version: 0x%08x, major: 0x%04x, minor: 0x%02x\n", core_version, core_major, core_minor); if (core_major == 1 && core_minor >= 0x42) msm_host->use_14lpp_dll_reset = true; /* * SDCC 5 controller with major version 1, minor version 0x34 and later * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL. */ if (core_major == 1 && core_minor < 0x34) msm_host->use_cdclp533 = true; /* * Support for some capabilities is not advertised by newer * controller versions and must be explicitly enabled. */ if (core_major >= 1 && core_minor != 0x11 && core_minor != 0x12) { config = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES); config |= SDHCI_CAN_VDD_300 | SDHCI_CAN_DO_8BIT; writel_relaxed(config, host->ioaddr + CORE_VENDOR_SPEC_CAPABILITIES0); } /* * Power on reset state may trigger power irq if previous status of * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq * interrupt in GIC, any pending power irq interrupt should be * acknowledged. Otherwise power irq interrupt handler would be * fired prematurely. */ sdhci_msm_handle_pwr_irq(host, 0); /* * Ensure that above writes are propogated before interrupt enablement * in GIC. */ mb(); /* Setup IRQ for handling power/voltage tasks with PMIC */ msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq"); if (msm_host->pwr_irq < 0) { dev_err(&pdev->dev, "Get pwr_irq failed (%d)\n", msm_host->pwr_irq); ret = msm_host->pwr_irq; goto clk_disable; } sdhci_msm_init_pwr_irq_wait(msm_host); /* Enable pwr irq interrupts */ writel_relaxed(INT_MASK, msm_host->core_mem + CORE_PWRCTL_MASK); ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL, sdhci_msm_pwr_irq, IRQF_ONESHOT, dev_name(&pdev->dev), host); if (ret) { dev_err(&pdev->dev, "Request IRQ failed (%d)\n", ret); goto clk_disable; } pm_runtime_get_noresume(&pdev->dev); pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_MMC_AUTOSUSPEND_DELAY_MS); pm_runtime_use_autosuspend(&pdev->dev); host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning; ret = sdhci_add_host(host); if (ret) goto pm_runtime_disable; pm_runtime_mark_last_busy(&pdev->dev); pm_runtime_put_autosuspend(&pdev->dev); return 0; pm_runtime_disable: pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); pm_runtime_put_noidle(&pdev->dev); clk_disable: clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks), msm_host->bulk_clks); bus_clk_disable: if (!IS_ERR(msm_host->bus_clk)) clk_disable_unprepare(msm_host->bus_clk); pltfm_free: sdhci_pltfm_free(pdev); return ret; }
static int sunxi_de2_clk_probe(struct platform_device *pdev) { struct resource *res; struct clk *bus_clk, *mod_clk; struct reset_control *rstc; void __iomem *reg; const struct sunxi_ccu_desc *ccu_desc; int ret; ccu_desc = of_device_get_match_data(&pdev->dev); if (!ccu_desc) return -EINVAL; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); reg = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(reg)) return PTR_ERR(reg); bus_clk = devm_clk_get(&pdev->dev, "bus"); if (IS_ERR(bus_clk)) { ret = PTR_ERR(bus_clk); if (ret != -EPROBE_DEFER) dev_err(&pdev->dev, "Couldn't get bus clk: %d\n", ret); return ret; } mod_clk = devm_clk_get(&pdev->dev, "mod"); if (IS_ERR(mod_clk)) { ret = PTR_ERR(mod_clk); if (ret != -EPROBE_DEFER) dev_err(&pdev->dev, "Couldn't get mod clk: %d\n", ret); return ret; } rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); if (IS_ERR(rstc)) { ret = PTR_ERR(rstc); if (ret != -EPROBE_DEFER) dev_err(&pdev->dev, "Couldn't get reset control: %d\n", ret); return ret; } /* The clocks need to be enabled for us to access the registers */ ret = clk_prepare_enable(bus_clk); if (ret) { dev_err(&pdev->dev, "Couldn't enable bus clk: %d\n", ret); return ret; } ret = clk_prepare_enable(mod_clk); if (ret) { dev_err(&pdev->dev, "Couldn't enable mod clk: %d\n", ret); goto err_disable_bus_clk; } /* The reset control needs to be asserted for the controls to work */ ret = reset_control_deassert(rstc); if (ret) { dev_err(&pdev->dev, "Couldn't deassert reset control: %d\n", ret); goto err_disable_mod_clk; } ret = sunxi_ccu_probe(pdev->dev.of_node, reg, ccu_desc); if (ret) goto err_assert_reset; return 0; err_assert_reset: reset_control_assert(rstc); err_disable_mod_clk: clk_disable_unprepare(mod_clk); err_disable_bus_clk: clk_disable_unprepare(bus_clk); return ret; }
/* Search EMAC board, allocate space and register it */ static int emac_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct emac_board_info *db; struct net_device *ndev; int ret = 0; const char *mac_addr; ndev = alloc_etherdev(sizeof(struct emac_board_info)); if (!ndev) { dev_err(&pdev->dev, "could not allocate device.\n"); return -ENOMEM; } SET_NETDEV_DEV(ndev, &pdev->dev); db = netdev_priv(ndev); memset(db, 0, sizeof(*db)); db->dev = &pdev->dev; db->ndev = ndev; db->pdev = pdev; spin_lock_init(&db->lock); db->membase = of_iomap(np, 0); if (!db->membase) { dev_err(&pdev->dev, "failed to remap registers\n"); ret = -ENOMEM; goto out; } /* fill in parameters for net-dev structure */ ndev->base_addr = (unsigned long)db->membase; ndev->irq = irq_of_parse_and_map(np, 0); if (ndev->irq == -ENXIO) { netdev_err(ndev, "No irq resource\n"); ret = ndev->irq; goto out_iounmap; } db->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(db->clk)) { ret = PTR_ERR(db->clk); goto out_iounmap; } ret = clk_prepare_enable(db->clk); if (ret) { dev_err(&pdev->dev, "Error couldn't enable clock (%d)\n", ret); goto out_iounmap; } ret = sunxi_sram_claim(&pdev->dev); if (ret) { dev_err(&pdev->dev, "Error couldn't map SRAM to device\n"); goto out_clk_disable_unprepare; } db->phy_node = of_parse_phandle(np, "phy", 0); if (!db->phy_node) { dev_err(&pdev->dev, "no associated PHY\n"); ret = -ENODEV; goto out_release_sram; } /* Read MAC-address from DT */ mac_addr = of_get_mac_address(np); if (mac_addr) memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); /* Check if the MAC address is valid, if not get a random one */ if (!is_valid_ether_addr(ndev->dev_addr)) { eth_hw_addr_random(ndev); dev_warn(&pdev->dev, "using random MAC address %pM\n", ndev->dev_addr); } db->emacrx_completed_flag = 1; emac_powerup(ndev); emac_reset(db); ndev->netdev_ops = &emac_netdev_ops; ndev->watchdog_timeo = msecs_to_jiffies(watchdog); ndev->ethtool_ops = &emac_ethtool_ops; platform_set_drvdata(pdev, ndev); /* Carrier starts down, phylib will bring it up */ netif_carrier_off(ndev); ret = register_netdev(ndev); if (ret) { dev_err(&pdev->dev, "Registering netdev failed!\n"); ret = -ENODEV; goto out_release_sram; } dev_info(&pdev->dev, "%s: at %p, IRQ %d MAC: %pM\n", ndev->name, db->membase, ndev->irq, ndev->dev_addr); return 0; out_release_sram: sunxi_sram_release(&pdev->dev); out_clk_disable_unprepare: clk_disable_unprepare(db->clk); out_iounmap: iounmap(db->membase); out: dev_err(db->dev, "not found (%d).\n", ret); free_netdev(ndev); return ret; }
static int rockchip_spi_probe(struct platform_device *pdev) { int ret = 0; struct rockchip_spi *rs; struct spi_master *master; struct resource *mem; u32 rsd_nsecs; master = spi_alloc_master(&pdev->dev, sizeof(struct rockchip_spi)); if (!master) return -ENOMEM; platform_set_drvdata(pdev, master); rs = spi_master_get_devdata(master); /* Get basic io resource and map it */ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); rs->regs = devm_ioremap_resource(&pdev->dev, mem); if (IS_ERR(rs->regs)) { ret = PTR_ERR(rs->regs); goto err_ioremap_resource; } rs->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk"); if (IS_ERR(rs->apb_pclk)) { dev_err(&pdev->dev, "Failed to get apb_pclk\n"); ret = PTR_ERR(rs->apb_pclk); goto err_ioremap_resource; } rs->spiclk = devm_clk_get(&pdev->dev, "spiclk"); if (IS_ERR(rs->spiclk)) { dev_err(&pdev->dev, "Failed to get spi_pclk\n"); ret = PTR_ERR(rs->spiclk); goto err_ioremap_resource; } ret = clk_prepare_enable(rs->apb_pclk); if (ret) { dev_err(&pdev->dev, "Failed to enable apb_pclk\n"); goto err_ioremap_resource; } ret = clk_prepare_enable(rs->spiclk); if (ret) { dev_err(&pdev->dev, "Failed to enable spi_clk\n"); goto err_spiclk_enable; } spi_enable_chip(rs, 0); rs->type = SSI_MOTO_SPI; rs->master = master; rs->dev = &pdev->dev; rs->max_freq = clk_get_rate(rs->spiclk); if (!of_property_read_u32(pdev->dev.of_node, "rx-sample-delay-ns", &rsd_nsecs)) rs->rsd_nsecs = rsd_nsecs; rs->fifo_len = get_fifo_len(rs); if (!rs->fifo_len) { dev_err(&pdev->dev, "Failed to get fifo length\n"); ret = -EINVAL; goto err_get_fifo_len; } spin_lock_init(&rs->lock); pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); master->auto_runtime_pm = true; master->bus_num = pdev->id; master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP; master->num_chipselect = 2; master->dev.of_node = pdev->dev.of_node; master->bits_per_word_mask = SPI_BPW_MASK(16) | SPI_BPW_MASK(8); master->set_cs = rockchip_spi_set_cs; master->prepare_message = rockchip_spi_prepare_message; master->unprepare_message = rockchip_spi_unprepare_message; master->transfer_one = rockchip_spi_transfer_one; master->handle_err = rockchip_spi_handle_err; rs->dma_tx.ch = dma_request_chan(rs->dev, "tx"); if (IS_ERR(rs->dma_tx.ch)) { /* Check tx to see if we need defer probing driver */ if (PTR_ERR(rs->dma_tx.ch) == -EPROBE_DEFER) { ret = -EPROBE_DEFER; goto err_get_fifo_len; } dev_warn(rs->dev, "Failed to request TX DMA channel\n"); rs->dma_tx.ch = NULL; } rs->dma_rx.ch = dma_request_chan(rs->dev, "rx"); if (IS_ERR(rs->dma_rx.ch)) { if (PTR_ERR(rs->dma_rx.ch) == -EPROBE_DEFER) { dma_release_channel(rs->dma_tx.ch); rs->dma_tx.ch = NULL; ret = -EPROBE_DEFER; goto err_get_fifo_len; } dev_warn(rs->dev, "Failed to request RX DMA channel\n"); rs->dma_rx.ch = NULL; } if (rs->dma_tx.ch && rs->dma_rx.ch) { dma_get_slave_caps(rs->dma_rx.ch, &(rs->dma_caps)); rs->dma_tx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_TXDR); rs->dma_rx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_RXDR); rs->dma_tx.direction = DMA_MEM_TO_DEV; rs->dma_rx.direction = DMA_DEV_TO_MEM; master->can_dma = rockchip_spi_can_dma; master->dma_tx = rs->dma_tx.ch; master->dma_rx = rs->dma_rx.ch; } ret = devm_spi_register_master(&pdev->dev, master); if (ret) { dev_err(&pdev->dev, "Failed to register master\n"); goto err_register_master; } return 0; err_register_master: pm_runtime_disable(&pdev->dev); if (rs->dma_tx.ch) dma_release_channel(rs->dma_tx.ch); if (rs->dma_rx.ch) dma_release_channel(rs->dma_rx.ch); err_get_fifo_len: clk_disable_unprepare(rs->spiclk); err_spiclk_enable: clk_disable_unprepare(rs->apb_pclk); err_ioremap_resource: spi_master_put(master); return ret; }
int pcm512x_probe(struct device *dev, struct regmap *regmap) { struct pcm512x_priv *pcm512x; int i, ret; pcm512x = devm_kzalloc(dev, sizeof(struct pcm512x_priv), GFP_KERNEL); if (!pcm512x) return -ENOMEM; dev_set_drvdata(dev, pcm512x); pcm512x->regmap = regmap; for (i = 0; i < ARRAY_SIZE(pcm512x->supplies); i++) pcm512x->supplies[i].supply = pcm512x_supply_names[i]; ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(pcm512x->supplies), pcm512x->supplies); if (ret != 0) { dev_err(dev, "Failed to get supplies: %d\n", ret); return ret; } pcm512x->supply_nb[0].notifier_call = pcm512x_regulator_event_0; pcm512x->supply_nb[1].notifier_call = pcm512x_regulator_event_1; pcm512x->supply_nb[2].notifier_call = pcm512x_regulator_event_2; for (i = 0; i < ARRAY_SIZE(pcm512x->supplies); i++) { ret = regulator_register_notifier(pcm512x->supplies[i].consumer, &pcm512x->supply_nb[i]); if (ret != 0) { dev_err(dev, "Failed to register regulator notifier: %d\n", ret); } } ret = regulator_bulk_enable(ARRAY_SIZE(pcm512x->supplies), pcm512x->supplies); if (ret != 0) { dev_err(dev, "Failed to enable supplies: %d\n", ret); return ret; } /* Reset the device, verifying I/O in the process for I2C */ ret = regmap_write(regmap, PCM512x_RESET, PCM512x_RSTM | PCM512x_RSTR); if (ret != 0) { dev_err(dev, "Failed to reset device: %d\n", ret); goto err; } ret = regmap_write(regmap, PCM512x_RESET, 0); if (ret != 0) { dev_err(dev, "Failed to reset device: %d\n", ret); goto err; } pcm512x->sclk = devm_clk_get(dev, NULL); if (IS_ERR(pcm512x->sclk)) { if (PTR_ERR(pcm512x->sclk) == -EPROBE_DEFER) return -EPROBE_DEFER; dev_info(dev, "No SCLK, using BCLK: %ld\n", PTR_ERR(pcm512x->sclk)); /* Disable reporting of missing SCLK as an error */ regmap_update_bits(regmap, PCM512x_ERROR_DETECT, PCM512x_IDCH, PCM512x_IDCH); /* Switch PLL input to BCLK */ regmap_update_bits(regmap, PCM512x_PLL_REF, PCM512x_SREF, PCM512x_SREF); } else { ret = clk_prepare_enable(pcm512x->sclk); if (ret != 0) { dev_err(dev, "Failed to enable SCLK: %d\n", ret); return ret; } } /* Default to standby mode */ ret = regmap_update_bits(pcm512x->regmap, PCM512x_POWER, PCM512x_RQST, PCM512x_RQST); if (ret != 0) { dev_err(dev, "Failed to request standby: %d\n", ret); goto err_clk; } pm_runtime_set_active(dev); pm_runtime_enable(dev); pm_runtime_idle(dev); ret = snd_soc_register_codec(dev, &pcm512x_codec_driver, &pcm512x_dai, 1); if (ret != 0) { dev_err(dev, "Failed to register CODEC: %d\n", ret); goto err_pm; } return 0; err_pm: pm_runtime_disable(dev); err_clk: if (!IS_ERR(pcm512x->sclk)) clk_disable_unprepare(pcm512x->sclk); err: regulator_bulk_disable(ARRAY_SIZE(pcm512x->supplies), pcm512x->supplies); return ret; }
int __init mx31_clocks_init(unsigned long fref) { void __iomem *base = MX31_IO_ADDRESS(MX31_CCM_BASE_ADDR); int i; clk[ckih] = imx_clk_fixed("ckih", fref); clk[ckil] = imx_clk_fixed("ckil", 32768); clk[mpll] = imx_clk_pllv1("mpll", "ckih", base + MXC_CCM_MPCTL); clk[spll] = imx_clk_pllv1("spll", "ckih", base + MXC_CCM_SRPCTL); clk[upll] = imx_clk_pllv1("upll", "ckih", base + MXC_CCM_UPCTL); clk[mcu_main] = imx_clk_mux("mcu_main", base + MXC_CCM_PMCR0, 31, 1, mcu_main_sel, ARRAY_SIZE(mcu_main_sel)); clk[hsp] = imx_clk_divider("hsp", "mcu_main", base + MXC_CCM_PDR0, 11, 3); clk[ahb] = imx_clk_divider("ahb", "mcu_main", base + MXC_CCM_PDR0, 3, 3); clk[nfc] = imx_clk_divider("nfc", "ahb", base + MXC_CCM_PDR0, 8, 3); clk[ipg] = imx_clk_divider("ipg", "ahb", base + MXC_CCM_PDR0, 6, 2); clk[per_div] = imx_clk_divider("per_div", "upll", base + MXC_CCM_PDR0, 16, 5); clk[per] = imx_clk_mux("per", base + MXC_CCM_CCMR, 24, 1, per_sel, ARRAY_SIZE(per_sel)); clk[csi] = imx_clk_mux("csi_sel", base + MXC_CCM_CCMR, 25, 1, csi_sel, ARRAY_SIZE(csi_sel)); clk[fir] = imx_clk_mux("fir_sel", base + MXC_CCM_CCMR, 11, 2, fir_sel, ARRAY_SIZE(fir_sel)); clk[csi_div] = imx_clk_divider("csi_div", "csi_sel", base + MXC_CCM_PDR0, 23, 9); clk[usb_div_pre] = imx_clk_divider("usb_div_pre", "upll", base + MXC_CCM_PDR1, 30, 2); clk[usb_div_post] = imx_clk_divider("usb_div_post", "usb_div_pre", base + MXC_CCM_PDR1, 27, 3); clk[fir_div_pre] = imx_clk_divider("fir_div_pre", "fir_sel", base + MXC_CCM_PDR1, 24, 3); clk[fir_div_post] = imx_clk_divider("fir_div_post", "fir_div_pre", base + MXC_CCM_PDR1, 23, 6); clk[sdhc1_gate] = imx_clk_gate2("sdhc1_gate", "per", base + MXC_CCM_CGR0, 0); clk[sdhc2_gate] = imx_clk_gate2("sdhc2_gate", "per", base + MXC_CCM_CGR0, 2); clk[gpt_gate] = imx_clk_gate2("gpt_gate", "per", base + MXC_CCM_CGR0, 4); clk[epit1_gate] = imx_clk_gate2("epit1_gate", "per", base + MXC_CCM_CGR0, 6); clk[epit2_gate] = imx_clk_gate2("epit2_gate", "per", base + MXC_CCM_CGR0, 8); clk[iim_gate] = imx_clk_gate2("iim_gate", "ipg", base + MXC_CCM_CGR0, 10); clk[ata_gate] = imx_clk_gate2("ata_gate", "ipg", base + MXC_CCM_CGR0, 12); clk[sdma_gate] = imx_clk_gate2("sdma_gate", "ahb", base + MXC_CCM_CGR0, 14); clk[cspi3_gate] = imx_clk_gate2("cspi3_gate", "ipg", base + MXC_CCM_CGR0, 16); clk[rng_gate] = imx_clk_gate2("rng_gate", "ipg", base + MXC_CCM_CGR0, 18); clk[uart1_gate] = imx_clk_gate2("uart1_gate", "per", base + MXC_CCM_CGR0, 20); clk[uart2_gate] = imx_clk_gate2("uart2_gate", "per", base + MXC_CCM_CGR0, 22); clk[ssi1_gate] = imx_clk_gate2("ssi1_gate", "spll", base + MXC_CCM_CGR0, 24); clk[i2c1_gate] = imx_clk_gate2("i2c1_gate", "per", base + MXC_CCM_CGR0, 26); clk[i2c2_gate] = imx_clk_gate2("i2c2_gate", "per", base + MXC_CCM_CGR0, 28); clk[i2c3_gate] = imx_clk_gate2("i2c3_gate", "per", base + MXC_CCM_CGR0, 30); clk[hantro_gate] = imx_clk_gate2("hantro_gate", "per", base + MXC_CCM_CGR1, 0); clk[mstick1_gate] = imx_clk_gate2("mstick1_gate", "per", base + MXC_CCM_CGR1, 2); clk[mstick2_gate] = imx_clk_gate2("mstick2_gate", "per", base + MXC_CCM_CGR1, 4); clk[csi_gate] = imx_clk_gate2("csi_gate", "csi_div", base + MXC_CCM_CGR1, 6); clk[rtc_gate] = imx_clk_gate2("rtc_gate", "ipg", base + MXC_CCM_CGR1, 8); clk[wdog_gate] = imx_clk_gate2("wdog_gate", "ipg", base + MXC_CCM_CGR1, 10); clk[pwm_gate] = imx_clk_gate2("pwm_gate", "per", base + MXC_CCM_CGR1, 12); clk[sim_gate] = imx_clk_gate2("sim_gate", "per", base + MXC_CCM_CGR1, 14); clk[ect_gate] = imx_clk_gate2("ect_gate", "per", base + MXC_CCM_CGR1, 16); clk[usb_gate] = imx_clk_gate2("usb_gate", "ahb", base + MXC_CCM_CGR1, 18); clk[kpp_gate] = imx_clk_gate2("kpp_gate", "ipg", base + MXC_CCM_CGR1, 20); clk[ipu_gate] = imx_clk_gate2("ipu_gate", "hsp", base + MXC_CCM_CGR1, 22); clk[uart3_gate] = imx_clk_gate2("uart3_gate", "per", base + MXC_CCM_CGR1, 24); clk[uart4_gate] = imx_clk_gate2("uart4_gate", "per", base + MXC_CCM_CGR1, 26); clk[uart5_gate] = imx_clk_gate2("uart5_gate", "per", base + MXC_CCM_CGR1, 28); clk[owire_gate] = imx_clk_gate2("owire_gate", "per", base + MXC_CCM_CGR1, 30); clk[ssi2_gate] = imx_clk_gate2("ssi2_gate", "spll", base + MXC_CCM_CGR2, 0); clk[cspi1_gate] = imx_clk_gate2("cspi1_gate", "ipg", base + MXC_CCM_CGR2, 2); clk[cspi2_gate] = imx_clk_gate2("cspi2_gate", "ipg", base + MXC_CCM_CGR2, 4); clk[gacc_gate] = imx_clk_gate2("gacc_gate", "per", base + MXC_CCM_CGR2, 6); clk[emi_gate] = imx_clk_gate2("emi_gate", "ahb", base + MXC_CCM_CGR2, 8); clk[rtic_gate] = imx_clk_gate2("rtic_gate", "ahb", base + MXC_CCM_CGR2, 10); clk[firi_gate] = imx_clk_gate2("firi_gate", "upll", base+MXC_CCM_CGR2, 12); for (i = 0; i < ARRAY_SIZE(clk); i++) if (IS_ERR(clk[i])) pr_err("imx31 clk %d: register failed with %ld\n", i, PTR_ERR(clk[i])); clk_register_clkdev(clk[gpt_gate], "per", "imx-gpt.0"); clk_register_clkdev(clk[ipg], "ipg", "imx-gpt.0"); clk_register_clkdev(clk[cspi1_gate], NULL, "imx31-cspi.0"); clk_register_clkdev(clk[cspi2_gate], NULL, "imx31-cspi.1"); clk_register_clkdev(clk[cspi3_gate], NULL, "imx31-cspi.2"); clk_register_clkdev(clk[pwm_gate], "pwm", NULL); clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0"); clk_register_clkdev(clk[rtc_gate], "rtc", NULL); clk_register_clkdev(clk[epit1_gate], "epit", NULL); clk_register_clkdev(clk[epit2_gate], "epit", NULL); clk_register_clkdev(clk[nfc], NULL, "mxc_nand.0"); clk_register_clkdev(clk[ipu_gate], NULL, "ipu-core"); clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb"); clk_register_clkdev(clk[kpp_gate], NULL, "imx-keypad"); clk_register_clkdev(clk[usb_div_post], "per", "mxc-ehci.0"); clk_register_clkdev(clk[usb_gate], "ahb", "mxc-ehci.0"); clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.0"); clk_register_clkdev(clk[usb_div_post], "per", "mxc-ehci.1"); clk_register_clkdev(clk[usb_gate], "ahb", "mxc-ehci.1"); clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.1"); clk_register_clkdev(clk[usb_div_post], "per", "mxc-ehci.2"); clk_register_clkdev(clk[usb_gate], "ahb", "mxc-ehci.2"); clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2"); clk_register_clkdev(clk[usb_div_post], "per", "fsl-usb2-udc"); clk_register_clkdev(clk[usb_gate], "ahb", "fsl-usb2-udc"); clk_register_clkdev(clk[ipg], "ipg", "fsl-usb2-udc"); clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0"); /* i.mx31 has the i.mx21 type uart */ clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0"); clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.0"); clk_register_clkdev(clk[uart2_gate], "per", "imx21-uart.1"); clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.1"); clk_register_clkdev(clk[uart3_gate], "per", "imx21-uart.2"); clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.2"); clk_register_clkdev(clk[uart4_gate], "per", "imx21-uart.3"); clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.3"); clk_register_clkdev(clk[uart5_gate], "per", "imx21-uart.4"); clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.4"); clk_register_clkdev(clk[i2c1_gate], NULL, "imx-i2c.0"); clk_register_clkdev(clk[i2c2_gate], NULL, "imx-i2c.1"); clk_register_clkdev(clk[i2c3_gate], NULL, "imx-i2c.2"); clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1.0"); clk_register_clkdev(clk[sdhc1_gate], NULL, "mxc-mmc.0"); clk_register_clkdev(clk[sdhc2_gate], NULL, "mxc-mmc.1"); clk_register_clkdev(clk[ssi1_gate], NULL, "imx-ssi.0"); clk_register_clkdev(clk[ssi2_gate], NULL, "imx-ssi.1"); clk_register_clkdev(clk[firi_gate], "firi", NULL); clk_register_clkdev(clk[ata_gate], NULL, "pata_imx"); clk_register_clkdev(clk[rtic_gate], "rtic", NULL); clk_register_clkdev(clk[rng_gate], "rng", NULL); clk_register_clkdev(clk[sdma_gate], NULL, "imx31-sdma"); clk_register_clkdev(clk[iim_gate], "iim", NULL); clk_set_parent(clk[csi], clk[upll]); clk_prepare_enable(clk[emi_gate]); clk_prepare_enable(clk[iim_gate]); mx31_revision(); clk_disable_unprepare(clk[iim_gate]); mxc_timer_init(MX31_IO_ADDRESS(MX31_GPT1_BASE_ADDR), MX31_INT_GPT); return 0; }
static void pn547_change_clk(struct pn547_dev *pn547_dev, unsigned int clk_state) { static unsigned int nOldClkState = CLK_DISABLE; int ret = 0; if (nOldClkState == clk_state) { pr_err("%s: Desired clock state(%d) is same as previous state(%d)! Skip!\n", __func__, clk_state, nOldClkState); } else { switch (clk_state) { case CLK_DISABLE: if (nOldClkState == CLK_PIN) { if (pn547_dev->clk_pin != NULL) { clk_disable_unprepare(pn547_dev->clk_pin); nOldClkState = CLK_DISABLE; //pr_err("%s: PMIC Clock is Disabled\n", __func__); // for debug } else { pr_err("%s: PN547 could not get clock!\n", __func__); } } else if (nOldClkState == CLK_CONT) { if (pn547_dev->clk_cont != NULL) { clk_disable_unprepare(pn547_dev->clk_cont); nOldClkState = CLK_DISABLE; //pr_err("%s: PMIC Clock is Disabled\n", __func__); // for debug } else { pr_err("%s: PN547 could not get clock!\n", __func__); } } break; case CLK_PIN: if (pn547_dev->clk_pin != NULL) { ret = clk_prepare_enable(pn547_dev->clk_pin); if (ret) { pr_err("%s: PN547 could not enable clock (%d)\n", __func__, ret); clk_disable_unprepare(pn547_dev->clk_pin); nOldClkState = CLK_DISABLE; } nOldClkState = CLK_PIN; //pr_err("%s: PMIC Clock source is CXO_D1_PIN!\n", __func__); // for debug } else { pr_err("%s: PN547 could not get pin clock!\n", __func__); } break; case CLK_CONT: if (pn547_dev->clk_cont != NULL) { ret = clk_prepare_enable(pn547_dev->clk_cont); if (ret) { pr_err("%s: PN547 could not enable clock (%d)\n", __func__, ret); clk_disable_unprepare(pn547_dev->clk_cont); nOldClkState = CLK_DISABLE; } nOldClkState = CLK_CONT; //pr_err("%s: PMIC Clock source is CXO_D1!\n", __func__); // for debug } else { pr_err("%s: PN547 could not get cont. clock!\n", __func__); } break; default: pr_err("%s: Undefined Clock Setting!\n", __func__); break; } } }
static int pl353_smc_probe(struct platform_device *pdev) { struct pl353_smc_data *pl353_smc; struct device_node *child; struct resource *res; int err; struct device_node *of_node = pdev->dev.of_node; const struct of_device_id *matches = NULL; pl353_smc = devm_kzalloc(&pdev->dev, sizeof(*pl353_smc), GFP_KERNEL); if (!pl353_smc) return -ENOMEM; /* Get the NAND controller virtual address */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); pl353_smc_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(pl353_smc_base)) return PTR_ERR(pl353_smc_base); pl353_smc->aclk = devm_clk_get(&pdev->dev, "aclk"); if (IS_ERR(pl353_smc->aclk)) { dev_err(&pdev->dev, "aclk clock not found.\n"); return PTR_ERR(pl353_smc->aclk); } pl353_smc->memclk = devm_clk_get(&pdev->dev, "memclk"); if (IS_ERR(pl353_smc->memclk)) { dev_err(&pdev->dev, "memclk clock not found.\n"); return PTR_ERR(pl353_smc->memclk); } err = clk_prepare_enable(pl353_smc->aclk); if (err) { dev_err(&pdev->dev, "Unable to enable AXI clock.\n"); return err; } err = clk_prepare_enable(pl353_smc->memclk); if (err) { dev_err(&pdev->dev, "Unable to enable memory clock.\n"); goto out_clk_dis_aper; } platform_set_drvdata(pdev, pl353_smc); /* clear interrupts */ writel(PL353_SMC_CFG_CLR_DEFAULT_MASK, pl353_smc_base + PL353_SMC_CFG_CLR_OFFS); /* Find compatible children. Only a single child is supported */ for_each_available_child_of_node(of_node, child) { if (of_match_node(matches_nand, child)) { pl353_smc_init_nand_interface(pdev, child); if (!matches) { matches = matches_nand; } else { dev_err(&pdev->dev, "incompatible configuration\n"); goto out_clk_disable; } } if (of_match_node(matches_nor, child)) { static int counts; if (!matches) { matches = matches_nor; } else { if (matches != matches_nor || counts > 1) { dev_err(&pdev->dev, "incompatible configuration\n"); goto out_clk_disable; } } counts++; } } if (matches) of_platform_populate(of_node, matches, NULL, &pdev->dev); return 0; out_clk_disable: clk_disable_unprepare(pl353_smc->memclk); out_clk_dis_aper: clk_disable_unprepare(pl353_smc->aclk); return err; }
int twl6040_power(struct twl6040 *twl6040, int on) { int ret = 0; mutex_lock(&twl6040->mutex); if (on) { /* already powered-up */ if (twl6040->power_count++) goto out; clk_prepare_enable(twl6040->clk32k); /* Allow writes to the chip */ regcache_cache_only(twl6040->regmap, false); if (gpio_is_valid(twl6040->audpwron)) { /* use automatic power-up sequence */ ret = twl6040_power_up_automatic(twl6040); if (ret) { twl6040->power_count = 0; goto out; } } else { /* use manual power-up sequence */ ret = twl6040_power_up_manual(twl6040); if (ret) { twl6040->power_count = 0; goto out; } } /* Sync with the HW */ regcache_sync(twl6040->regmap); /* Default PLL configuration after power up */ twl6040->pll = TWL6040_SYSCLK_SEL_LPPLL; twl6040->sysclk = 19200000; twl6040->mclk = 32768; } else { /* already powered-down */ if (!twl6040->power_count) { dev_err(twl6040->dev, "device is already powered-off\n"); ret = -EPERM; goto out; } if (--twl6040->power_count) goto out; if (gpio_is_valid(twl6040->audpwron)) { /* use AUDPWRON line */ gpio_set_value(twl6040->audpwron, 0); /* power-down sequence latency */ usleep_range(500, 700); } else { /* use manual power-down sequence */ twl6040_power_down_manual(twl6040); } /* Set regmap to cache only and mark it as dirty */ regcache_cache_only(twl6040->regmap, true); regcache_mark_dirty(twl6040->regmap); twl6040->sysclk = 0; twl6040->mclk = 0; clk_disable_unprepare(twl6040->clk32k); } out: mutex_unlock(&twl6040->mutex); return ret; }
static int snddev_icodec_open_rx(struct snddev_icodec_state *icodec) { int trc, err; int smps_mode = PMAPP_SMPS_MODE_VOTE_PWM; struct msm_afe_config afe_config; struct snddev_icodec_drv_state *drv = &snddev_icodec_drv; struct lpa_codec_config lpa_config; pm_qos_update_request(&drv->rx_pm_qos_req, msm_cpuidle_get_deep_idle_latency()); if ((icodec->data->acdb_id == ACDB_ID_HEADSET_SPKR_MONO) || (icodec->data->acdb_id == ACDB_ID_HEADSET_SPKR_STEREO)) { /* Vote PMAPP_SMPS_MODE_VOTE_PFM for headset */ smps_mode = PMAPP_SMPS_MODE_VOTE_PFM; MM_DBG("snddev_icodec_open_rx: PMAPP_SMPS_MODE_VOTE_PFM \n"); } else MM_DBG("snddev_icodec_open_rx: PMAPP_SMPS_MODE_VOTE_PWM \n"); /* Vote for SMPS mode*/ err = pmapp_smps_mode_vote(SMPS_AUDIO_PLAYBACK_ID, PMAPP_VREG_S4, smps_mode); if (err != 0) MM_ERR("pmapp_smps_mode_vote error %d\n", err); /* enable MI2S RX master block */ /* enable MI2S RX bit clock */ trc = clk_set_rate(drv->rx_mclk, SNDDEV_ICODEC_CLK_RATE(icodec->sample_rate)); if (IS_ERR_VALUE(trc)) goto error_invalid_freq; clk_prepare_enable(drv->rx_mclk); clk_prepare_enable(drv->rx_sclk); /* clk_set_rate(drv->lpa_codec_clk, 1); */ /* Remove if use pcom */ clk_prepare_enable(drv->lpa_p_clk); clk_prepare_enable(drv->lpa_codec_clk); clk_prepare_enable(drv->lpa_core_clk); /* Enable LPA sub system */ drv->lpa = lpa_get(); if (!drv->lpa) goto error_lpa; lpa_config.sample_rate = icodec->sample_rate; lpa_config.sample_width = 16; lpa_config.output_interface = LPA_OUTPUT_INTF_WB_CODEC; lpa_config.num_channels = icodec->data->channel_mode; lpa_cmd_codec_config(drv->lpa, &lpa_config); /* Set audio interconnect reg to LPA */ audio_interct_codec(AUDIO_INTERCT_LPA); /* Set MI2S */ mi2s_set_codec_output_path((icodec->data->channel_mode == 2 ? MI2S_CHAN_STEREO : MI2S_CHAN_MONO_PACKED), WT_16_BIT); if (icodec->data->voltage_on) icodec->data->voltage_on(); /* Configure ADIE */ trc = adie_codec_open(icodec->data->profile, &icodec->adie_path); if (IS_ERR_VALUE(trc)) goto error_adie; /* OSR default to 256, can be changed for power optimization * If OSR is to be changed, need clock API for setting the divider */ adie_codec_setpath(icodec->adie_path, icodec->sample_rate, 256); /* Start AFE */ afe_config.sample_rate = icodec->sample_rate / 1000; afe_config.channel_mode = icodec->data->channel_mode; afe_config.volume = AFE_VOLUME_UNITY; trc = afe_enable(AFE_HW_PATH_CODEC_RX, &afe_config); if (IS_ERR_VALUE(trc)) goto error_afe; lpa_cmd_enable_codec(drv->lpa, 1); /* Enable ADIE */ adie_codec_proceed_stage(icodec->adie_path, ADIE_CODEC_DIGITAL_READY); adie_codec_proceed_stage(icodec->adie_path, ADIE_CODEC_DIGITAL_ANALOG_READY); /* Enable power amplifier */ if (icodec->data->pamp_on) icodec->data->pamp_on(); icodec->enabled = 1; pm_qos_update_request(&drv->rx_pm_qos_req, PM_QOS_DEFAULT_VALUE); return 0; error_afe: adie_codec_close(icodec->adie_path); icodec->adie_path = NULL; error_adie: lpa_put(drv->lpa); error_lpa: clk_disable_unprepare(drv->lpa_p_clk); clk_disable_unprepare(drv->lpa_codec_clk); clk_disable_unprepare(drv->lpa_core_clk); clk_disable_unprepare(drv->rx_sclk); clk_disable_unprepare(drv->rx_mclk); error_invalid_freq: MM_ERR("encounter error\n"); pm_qos_update_request(&drv->rx_pm_qos_req, PM_QOS_DEFAULT_VALUE); return -ENODEV; }
/* * Set the DDR to either 528MHz or 400MHz for iMX6qd * or 400MHz for iMX6dl. */ static int set_high_bus_freq(int high_bus_freq) { struct clk *periph_clk_parent; if (bus_freq_scaling_initialized && bus_freq_scaling_is_active) cancel_delayed_work_sync(&low_bus_freq_handler); if (busfreq_suspended) return 0; if (cpu_is_imx6q()) periph_clk_parent = pll2_bus; else periph_clk_parent = pll2_400; if (!bus_freq_scaling_initialized || !bus_freq_scaling_is_active) return 0; if (high_bus_freq_mode) return 0; /* medium bus freq is only supported for MX6DQ */ if (med_bus_freq_mode && !high_bus_freq) return 0; if (low_bus_freq_mode || ultra_low_bus_freq_mode) busfreq_notify(LOW_BUSFREQ_EXIT); if (cpu_is_imx6()) clk_prepare_enable(pll3); if (cpu_is_imx7d()) exit_lpm_imx7d(); else if (cpu_is_imx6sl()) exit_lpm_imx6sl(); else if (cpu_is_imx6sx() || cpu_is_imx6ul()) exit_lpm_imx6_up(); else { if (high_bus_freq) { clk_prepare_enable(pll2_400); update_ddr_freq_imx_smp(ddr_normal_rate); /* Make sure periph clk's parent also got updated */ imx_clk_set_parent(periph_clk2_sel, pll3); imx_clk_set_parent(periph_pre_clk, periph_clk_parent); imx_clk_set_parent(periph_clk, periph_pre_clk); if (cpu_is_imx6dl()) { /* Set axi to pll3_pfd1_540m */ imx_clk_set_parent(axi_alt_sel_clk, pll3_pfd1_540m); imx_clk_set_parent(axi_sel_clk, axi_alt_sel_clk); } clk_disable_unprepare(pll2_400); } else { update_ddr_freq_imx_smp(ddr_med_rate); /* Make sure periph clk's parent also got updated */ imx_clk_set_parent(periph_clk2_sel, pll3); imx_clk_set_parent(periph_pre_clk, pll2_400); imx_clk_set_parent(periph_clk, periph_pre_clk); } if (audio_bus_freq_mode) clk_disable_unprepare(pll2_400); } high_bus_freq_mode = 1; med_bus_freq_mode = 0; low_bus_freq_mode = 0; audio_bus_freq_mode = 0; cur_bus_freq_mode = BUS_FREQ_HIGH; if (cpu_is_imx6()) clk_disable_unprepare(pll3); if (high_bus_freq_mode) dev_dbg(busfreq_dev, "Bus freq set to high mode. Count:\ high %d, med %d, audio %d\n", high_bus_count, med_bus_count, audio_bus_count); if (med_bus_freq_mode) dev_dbg(busfreq_dev, "Bus freq set to med mode. Count:\ high %d, med %d, audio %d\n", high_bus_count, med_bus_count, audio_bus_count); return 0; }
static int snddev_icodec_open_tx(struct snddev_icodec_state *icodec) { int trc; int i, err; struct msm_afe_config afe_config; struct snddev_icodec_drv_state *drv = &snddev_icodec_drv;; pm_qos_update_request(&drv->tx_pm_qos_req, msm_cpuidle_get_deep_idle_latency()); /* Vote for PWM mode*/ err = pmapp_smps_mode_vote(SMPS_AUDIO_RECORD_ID, PMAPP_VREG_S4, PMAPP_SMPS_MODE_VOTE_PWM); if (err != 0) MM_ERR("pmapp_smps_mode_vote error %d\n", err); /* Reuse pamp_on for TX platform-specific setup */ if (icodec->data->pamp_on) icodec->data->pamp_on(); for (i = 0; i < icodec->data->pmctl_id_sz; i++) { pmic_hsed_enable(icodec->data->pmctl_id[i], #if defined(CONFIG_MACH_KYLE) || defined(CONFIG_MACH_ROY) PM_HSED_ENABLE_ALWAYS); #else PM_HSED_ENABLE_PWM_TCXO); #endif } /* enable MI2S TX master block */ /* enable MI2S TX bit clock */ trc = clk_set_rate(drv->tx_mclk, SNDDEV_ICODEC_CLK_RATE(icodec->sample_rate)); if (IS_ERR_VALUE(trc)) goto error_invalid_freq; clk_prepare_enable(drv->tx_mclk); clk_prepare_enable(drv->tx_sclk); /* Set MI2S */ mi2s_set_codec_input_path((icodec->data->channel_mode == REAL_STEREO_CHANNEL_MODE ? MI2S_CHAN_STEREO : (icodec->data->channel_mode == 2 ? MI2S_CHAN_STEREO : MI2S_CHAN_MONO_RAW)), WT_16_BIT); /* Configure ADIE */ trc = adie_codec_open(icodec->data->profile, &icodec->adie_path); if (IS_ERR_VALUE(trc)) goto error_adie; /* Enable ADIE */ adie_codec_setpath(icodec->adie_path, icodec->sample_rate, 256); adie_codec_proceed_stage(icodec->adie_path, ADIE_CODEC_DIGITAL_READY); adie_codec_proceed_stage(icodec->adie_path, ADIE_CODEC_DIGITAL_ANALOG_READY); /* Start AFE */ afe_config.sample_rate = icodec->sample_rate / 1000; afe_config.channel_mode = icodec->data->channel_mode; afe_config.volume = AFE_VOLUME_UNITY; trc = afe_enable(AFE_HW_PATH_CODEC_TX, &afe_config); if (IS_ERR_VALUE(trc)) goto error_afe; icodec->enabled = 1; pm_qos_update_request(&drv->tx_pm_qos_req, PM_QOS_DEFAULT_VALUE); return 0; error_afe: adie_codec_close(icodec->adie_path); icodec->adie_path = NULL; error_adie: clk_disable_unprepare(drv->tx_sclk); clk_disable_unprepare(drv->tx_mclk); error_invalid_freq: /* Disable mic bias */ for (i = 0; i < icodec->data->pmctl_id_sz; i++) { pmic_hsed_enable(icodec->data->pmctl_id[i], PM_HSED_ENABLE_OFF); } if (icodec->data->pamp_off) icodec->data->pamp_off(); MM_ERR("encounter error\n"); pm_qos_update_request(&drv->tx_pm_qos_req, PM_QOS_DEFAULT_VALUE); return -ENODEV; }
static int msm_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) { struct msm_rng_device *msm_rng_dev; struct platform_device *pdev; void __iomem *base; size_t maxsize; size_t currsize = 0; unsigned long val; unsigned long *retdata = data; int ret; msm_rng_dev = (struct msm_rng_device *)rng->priv; pdev = msm_rng_dev->pdev; base = msm_rng_dev->base; /* calculate max size bytes to transfer back to caller */ maxsize = min_t(size_t, MAX_HW_FIFO_SIZE, max); /* no room for word data */ if (maxsize < 4) return 0; /* enable PRNG clock */ ret = clk_prepare_enable(msm_rng_dev->prng_clk); if (ret) { dev_err(&pdev->dev, "failed to enable clock in callback\n"); return 0; } if (msm_rng_dev->qrng_perf_client) { ret = msm_bus_scale_client_update_request( msm_rng_dev->qrng_perf_client, 1); if (ret) pr_err("bus_scale_client_update_req failed!\n"); } /* read random data from h/w */ do { /* check status bit if data is available */ if (!(readl_relaxed(base + PRNG_STATUS_OFFSET) & 0x00000001)) break; /* no data to read so just bail */ /* read FIFO */ val = readl_relaxed(base + PRNG_DATA_OUT_OFFSET); if (!val) break; /* no data to read so just bail */ /* write data back to callers pointer */ *(retdata++) = val; currsize += 4; /* make sure we stay on 32bit boundary */ if ((maxsize - currsize) < 4) break; } while (currsize < maxsize); if (msm_rng_dev->qrng_perf_client) ret = msm_bus_scale_client_update_request( msm_rng_dev->qrng_perf_client, 0); /* vote to turn off clock */ clk_disable_unprepare(msm_rng_dev->prng_clk); return currsize; }
static void debugfs_adie_loopback(u32 loop) { struct snddev_icodec_drv_state *drv = &snddev_icodec_drv; if (loop) { /* enable MI2S RX master block */ /* enable MI2S RX bit clock */ clk_set_rate(drv->rx_mclk, SNDDEV_ICODEC_CLK_RATE(8000)); clk_prepare_enable(drv->rx_mclk); clk_prepare_enable(drv->rx_sclk); MM_INFO("configure ADIE RX path\n"); /* Configure ADIE */ adie_codec_open(&debug_rx_profile, &debugfs_rx_adie); adie_codec_setpath(debugfs_rx_adie, 8000, 256); adie_codec_proceed_stage(debugfs_rx_adie, ADIE_CODEC_DIGITAL_ANALOG_READY); MM_INFO("Enable Handset Mic bias\n"); #if defined(CONFIG_MACH_KYLE) || defined(CONFIG_MACH_ROY) pmic_hsed_enable(PM_HSED_CONTROLLER_0, PM_HSED_ENABLE_ALWAYS); #else pmic_hsed_enable(PM_HSED_CONTROLLER_0, PM_HSED_ENABLE_PWM_TCXO); #endif /* enable MI2S TX master block */ /* enable MI2S TX bit clock */ clk_set_rate(drv->tx_mclk, SNDDEV_ICODEC_CLK_RATE(8000)); clk_prepare_enable(drv->tx_mclk); clk_prepare_enable(drv->tx_sclk); MM_INFO("configure ADIE TX path\n"); /* Configure ADIE */ adie_codec_open(&debug_tx_lb_profile, &debugfs_tx_adie); adie_codec_setpath(debugfs_tx_adie, 8000, 256); adie_codec_proceed_stage(debugfs_tx_adie, ADIE_CODEC_DIGITAL_ANALOG_READY); } else { /* Disable ADIE */ adie_codec_proceed_stage(debugfs_rx_adie, ADIE_CODEC_DIGITAL_OFF); adie_codec_close(debugfs_rx_adie); adie_codec_proceed_stage(debugfs_tx_adie, ADIE_CODEC_DIGITAL_OFF); adie_codec_close(debugfs_tx_adie); pmic_hsed_enable(PM_HSED_CONTROLLER_0, PM_HSED_ENABLE_OFF); /* Disable MI2S RX master block */ /* Disable MI2S RX bit clock */ clk_disable_unprepare(drv->rx_sclk); clk_disable_unprepare(drv->rx_mclk); /* Disable MI2S TX master block */ /* Disable MI2S TX bit clock */ clk_disable_unprepare(drv->tx_sclk); clk_disable_unprepare(drv->tx_mclk); } }
/* * Set SAIF clock and MCLK */ static int mxs_saif_set_clk(struct mxs_saif *saif, unsigned int mclk, unsigned int rate) { u32 scr; int ret; struct mxs_saif *master_saif; dev_dbg(saif->dev, "mclk %d rate %d\n", mclk, rate); /* Set master saif to generate proper clock */ master_saif = mxs_saif_get_master(saif); if (!master_saif) return -EINVAL; dev_dbg(saif->dev, "master saif%d\n", master_saif->id); /* Checking if can playback and capture simutaneously */ if (master_saif->ongoing && rate != master_saif->cur_rate) { dev_err(saif->dev, "can not change clock, master saif%d(rate %d) is ongoing\n", master_saif->id, master_saif->cur_rate); return -EINVAL; } scr = __raw_readl(master_saif->base + SAIF_CTRL); scr &= ~BM_SAIF_CTRL_BITCLK_MULT_RATE; scr &= ~BM_SAIF_CTRL_BITCLK_BASE_RATE; /* * Set SAIF clock * * The SAIF clock should be either 384*fs or 512*fs. * If MCLK is used, the SAIF clk ratio need to match mclk ratio. * For 32x mclk, set saif clk as 512*fs. * For 48x mclk, set saif clk as 384*fs. * * If MCLK is not used, we just set saif clk to 512*fs. */ clk_prepare_enable(master_saif->clk); if (master_saif->mclk_in_use) { if (mclk % 32 == 0) { scr &= ~BM_SAIF_CTRL_BITCLK_BASE_RATE; ret = clk_set_rate(master_saif->clk, 512 * rate); } else if (mclk % 48 == 0) { scr |= BM_SAIF_CTRL_BITCLK_BASE_RATE; ret = clk_set_rate(master_saif->clk, 384 * rate); } else { /* SAIF MCLK should be either 32x or 48x */ clk_disable_unprepare(master_saif->clk); return -EINVAL; } } else { ret = clk_set_rate(master_saif->clk, 512 * rate); scr &= ~BM_SAIF_CTRL_BITCLK_BASE_RATE; } clk_disable_unprepare(master_saif->clk); if (ret) return ret; master_saif->cur_rate = rate; if (!master_saif->mclk_in_use) { __raw_writel(scr, master_saif->base + SAIF_CTRL); return 0; } /* * Program the over-sample rate for MCLK output * * The available MCLK range is 32x, 48x... 512x. The rate * could be from 8kHz to 192kH. */ switch (mclk / rate) { case 32: scr |= BF_SAIF_CTRL_BITCLK_MULT_RATE(4); break; case 64: scr |= BF_SAIF_CTRL_BITCLK_MULT_RATE(3); break; case 128: scr |= BF_SAIF_CTRL_BITCLK_MULT_RATE(2); break; case 256: scr |= BF_SAIF_CTRL_BITCLK_MULT_RATE(1); break; case 512: scr |= BF_SAIF_CTRL_BITCLK_MULT_RATE(0); break; case 48: scr |= BF_SAIF_CTRL_BITCLK_MULT_RATE(3); break; case 96: scr |= BF_SAIF_CTRL_BITCLK_MULT_RATE(2); break; case 192: scr |= BF_SAIF_CTRL_BITCLK_MULT_RATE(1); break; case 384: scr |= BF_SAIF_CTRL_BITCLK_MULT_RATE(0); break; default: return -EINVAL; } __raw_writel(scr, master_saif->base + SAIF_CTRL); return 0; }
static void debugfs_afe_loopback(u32 loop) { int trc; struct msm_afe_config afe_config; struct snddev_icodec_drv_state *drv = &snddev_icodec_drv; struct lpa_codec_config lpa_config; if (loop) { /* Vote for SMPS mode*/ pmapp_smps_mode_vote(SMPS_AUDIO_PLAYBACK_ID, PMAPP_VREG_S4, PMAPP_SMPS_MODE_VOTE_PWM); /* enable MI2S RX master block */ /* enable MI2S RX bit clock */ trc = clk_set_rate(drv->rx_mclk, SNDDEV_ICODEC_CLK_RATE(8000)); if (IS_ERR_VALUE(trc)) MM_ERR("failed to set clk rate\n"); clk_prepare_enable(drv->rx_mclk); clk_prepare_enable(drv->rx_sclk); clk_prepare_enable(drv->lpa_p_clk); clk_prepare_enable(drv->lpa_codec_clk); clk_prepare_enable(drv->lpa_core_clk); /* Enable LPA sub system */ drv->lpa = lpa_get(); if (!drv->lpa) MM_ERR("failed to enable lpa\n"); lpa_config.sample_rate = 8000; lpa_config.sample_width = 16; lpa_config.output_interface = LPA_OUTPUT_INTF_WB_CODEC; lpa_config.num_channels = 1; lpa_cmd_codec_config(drv->lpa, &lpa_config); /* Set audio interconnect reg to LPA */ audio_interct_codec(AUDIO_INTERCT_LPA); mi2s_set_codec_output_path(MI2S_CHAN_MONO_PACKED, WT_16_BIT); MM_INFO("configure ADIE RX path\n"); /* Configure ADIE */ adie_codec_open(&debug_rx_profile, &debugfs_rx_adie); adie_codec_setpath(debugfs_rx_adie, 8000, 256); lpa_cmd_enable_codec(drv->lpa, 1); /* Start AFE for RX */ afe_config.sample_rate = 0x8; afe_config.channel_mode = 1; afe_config.volume = AFE_VOLUME_UNITY; MM_INFO("enable afe\n"); trc = afe_enable(AFE_HW_PATH_CODEC_RX, &afe_config); if (IS_ERR_VALUE(trc)) MM_ERR("fail to enable afe RX\n"); adie_codec_proceed_stage(debugfs_rx_adie, ADIE_CODEC_DIGITAL_READY); adie_codec_proceed_stage(debugfs_rx_adie, ADIE_CODEC_DIGITAL_ANALOG_READY); /* Vote for PWM mode*/ pmapp_smps_mode_vote(SMPS_AUDIO_RECORD_ID, PMAPP_VREG_S4, PMAPP_SMPS_MODE_VOTE_PWM); MM_INFO("Enable Handset Mic bias\n"); #if defined(CONFIG_MACH_KYLE) || defined(CONFIG_MACH_ROY) pmic_hsed_enable(PM_HSED_CONTROLLER_0, PM_HSED_ENABLE_ALWAYS); #else pmic_hsed_enable(PM_HSED_CONTROLLER_0, PM_HSED_ENABLE_PWM_TCXO); #endif /* enable MI2S TX master block */ /* enable MI2S TX bit clock */ clk_set_rate(drv->tx_mclk, SNDDEV_ICODEC_CLK_RATE(8000)); clk_prepare_enable(drv->tx_mclk); clk_prepare_enable(drv->tx_sclk); /* Set MI2S */ mi2s_set_codec_input_path(MI2S_CHAN_MONO_PACKED, WT_16_BIT); MM_INFO("configure ADIE TX path\n"); /* Configure ADIE */ adie_codec_open(&debug_tx_profile, &debugfs_tx_adie); adie_codec_setpath(debugfs_tx_adie, 8000, 256); adie_codec_proceed_stage(debugfs_tx_adie, ADIE_CODEC_DIGITAL_READY); adie_codec_proceed_stage(debugfs_tx_adie, ADIE_CODEC_DIGITAL_ANALOG_READY); /* Start AFE for TX */ afe_config.sample_rate = 0x8; afe_config.channel_mode = 1; afe_config.volume = AFE_VOLUME_UNITY; trc = afe_enable(AFE_HW_PATH_CODEC_TX, &afe_config); if (IS_ERR_VALUE(trc)) MM_ERR("failed to enable AFE TX\n"); /* Set the volume level to non unity, to avoid loopback effect */ afe_device_volume_ctrl(AFE_HW_PATH_CODEC_RX, 0x0500); /* enable afe loopback */ afe_loopback(1); MM_INFO("AFE loopback enabled\n"); } else { /* disable afe loopback */ afe_loopback(0); /* Remove the vote for SMPS mode*/ pmapp_smps_mode_vote(SMPS_AUDIO_PLAYBACK_ID, PMAPP_VREG_S4, PMAPP_SMPS_MODE_VOTE_DONTCARE); /* Disable ADIE */ adie_codec_proceed_stage(debugfs_rx_adie, ADIE_CODEC_DIGITAL_OFF); adie_codec_close(debugfs_rx_adie); /* Disable AFE for RX */ afe_disable(AFE_HW_PATH_CODEC_RX); /* Disable LPA Sub system */ lpa_cmd_enable_codec(drv->lpa, 0); lpa_put(drv->lpa); /* Disable LPA clocks */ clk_disable_unprepare(drv->lpa_p_clk); clk_disable_unprepare(drv->lpa_codec_clk); clk_disable_unprepare(drv->lpa_core_clk); /* Disable MI2S RX master block */ /* Disable MI2S RX bit clock */ clk_disable_unprepare(drv->rx_sclk); clk_disable_unprepare(drv->rx_mclk); pmapp_smps_mode_vote(SMPS_AUDIO_RECORD_ID, PMAPP_VREG_S4, PMAPP_SMPS_MODE_VOTE_DONTCARE); /* Disable AFE for TX */ afe_disable(AFE_HW_PATH_CODEC_TX); /* Disable ADIE */ adie_codec_proceed_stage(debugfs_tx_adie, ADIE_CODEC_DIGITAL_OFF); adie_codec_close(debugfs_tx_adie); /* Disable MI2S TX master block */ /* Disable MI2S TX bit clock */ clk_disable_unprepare(drv->tx_sclk); clk_disable_unprepare(drv->tx_mclk); pmic_hsed_enable(PM_HSED_CONTROLLER_0, PM_HSED_ENABLE_OFF); MM_INFO("AFE loopback disabled\n"); } }
static int ux500_probe(struct platform_device *pdev) { struct resource musb_resources[2]; struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev); struct device_node *np = pdev->dev.of_node; struct platform_device *musb; struct ux500_glue *glue; struct clk *clk; int ret = -ENOMEM; if (!pdata) { if (np) { pdata = ux500_of_probe(pdev, np); if (!pdata) goto err0; pdev->dev.platform_data = pdata; } else { dev_err(&pdev->dev, "no pdata or device tree found\n"); goto err0; } } glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL); if (!glue) goto err0; musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO); if (!musb) { dev_err(&pdev->dev, "failed to allocate musb device\n"); goto err0; } clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) { dev_err(&pdev->dev, "failed to get clock\n"); ret = PTR_ERR(clk); goto err1; } ret = clk_prepare_enable(clk); if (ret) { dev_err(&pdev->dev, "failed to enable clock\n"); goto err1; } musb->dev.parent = &pdev->dev; musb->dev.dma_mask = &pdev->dev.coherent_dma_mask; musb->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask; glue->dev = &pdev->dev; glue->musb = musb; glue->clk = clk; pdata->platform_ops = &ux500_ops; pdata->config = &ux500_musb_hdrc_config; platform_set_drvdata(pdev, glue); memset(musb_resources, 0x00, sizeof(*musb_resources) * ARRAY_SIZE(musb_resources)); musb_resources[0].name = pdev->resource[0].name; musb_resources[0].start = pdev->resource[0].start; musb_resources[0].end = pdev->resource[0].end; musb_resources[0].flags = pdev->resource[0].flags; musb_resources[1].name = pdev->resource[1].name; musb_resources[1].start = pdev->resource[1].start; musb_resources[1].end = pdev->resource[1].end; musb_resources[1].flags = pdev->resource[1].flags; ret = platform_device_add_resources(musb, musb_resources, ARRAY_SIZE(musb_resources)); if (ret) { dev_err(&pdev->dev, "failed to add resources\n"); goto err2; } ret = platform_device_add_data(musb, pdata, sizeof(*pdata)); if (ret) { dev_err(&pdev->dev, "failed to add platform_data\n"); goto err2; } ret = platform_device_add(musb); if (ret) { dev_err(&pdev->dev, "failed to register musb device\n"); goto err2; } return 0; err2: clk_disable_unprepare(clk); err1: platform_device_put(musb); err0: return ret; }
int tegra_asoc_utils_set_rate(struct tegra_asoc_utils_data *data, int srate, int mclk) { int new_baseclock; bool clk_change; int err; switch (srate) { case 11025: case 22050: case 44100: case 88200: if (data->soc == TEGRA_ASOC_UTILS_SOC_TEGRA20) new_baseclock = 56448000; else if (data->soc == TEGRA_ASOC_UTILS_SOC_TEGRA30) new_baseclock = 564480000; else new_baseclock = 282240000; break; case 8000: case 16000: case 32000: case 48000: case 64000: case 96000: if (data->soc == TEGRA_ASOC_UTILS_SOC_TEGRA20) new_baseclock = 73728000; else if (data->soc == TEGRA_ASOC_UTILS_SOC_TEGRA30) new_baseclock = 552960000; else new_baseclock = 368640000; break; default: return -EINVAL; } clk_change = ((new_baseclock != data->set_baseclock) || (mclk != data->set_mclk)); if (!clk_change) return 0; data->set_baseclock = 0; data->set_mclk = 0; clk_disable_unprepare(data->clk_cdev1); clk_disable_unprepare(data->clk_pll_a_out0); clk_disable_unprepare(data->clk_pll_a); err = clk_set_rate(data->clk_pll_a, new_baseclock); if (err) { dev_err(data->dev, "Can't set pll_a rate: %d\n", err); return err; } err = clk_set_rate(data->clk_pll_a_out0, mclk); if (err) { dev_err(data->dev, "Can't set pll_a_out0 rate: %d\n", err); return err; } /* Don't set cdev1/extern1 rate; it's locked to pll_a_out0 */ err = clk_prepare_enable(data->clk_pll_a); if (err) { dev_err(data->dev, "Can't enable pll_a: %d\n", err); return err; } err = clk_prepare_enable(data->clk_pll_a_out0); if (err) { dev_err(data->dev, "Can't enable pll_a_out0: %d\n", err); return err; } err = clk_prepare_enable(data->clk_cdev1); if (err) { dev_err(data->dev, "Can't enable cdev1: %d\n", err); return err; } data->set_baseclock = new_baseclock; data->set_mclk = mclk; return 0; }
static int mxc_hdmi_core_probe(struct platform_device *pdev) { struct mxc_hdmi_data *hdmi_data; struct resource *res; unsigned long flags; int ret = 0; #ifdef DEBUG overflow_lo = false; overflow_hi = false; #endif hdmi_core_init = 0; hdmi_dma_running = 0; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENOENT; ret = hdmi_core_get_of_property(pdev); if (ret < 0) { dev_err(&pdev->dev, "get hdmi of property fail\n"); return -ENOENT; } hdmi_data = devm_kzalloc(&pdev->dev, sizeof(struct mxc_hdmi_data), GFP_KERNEL); if (!hdmi_data) { dev_err(&pdev->dev, "Couldn't allocate mxc hdmi mfd device\n"); return -ENOMEM; } hdmi_data->pdev = pdev; pixel_clk = NULL; sample_rate = 48000; pixel_clk_rate = 0; hdmi_ratio = 100; spin_lock_init(&irq_spinlock); spin_lock_init(&edid_spinlock); spin_lock_init(&hdmi_cable_state_lock); spin_lock_init(&hdmi_blank_state_lock); spin_lock_init(&hdmi_audio_lock); spin_lock_irqsave(&hdmi_cable_state_lock, flags); hdmi_cable_state = 0; spin_unlock_irqrestore(&hdmi_cable_state_lock, flags); spin_lock_irqsave(&hdmi_blank_state_lock, flags); hdmi_blank_state = 0; spin_unlock_irqrestore(&hdmi_blank_state_lock, flags); spin_lock_irqsave(&hdmi_audio_lock, flags); hdmi_audio_stream_playback = NULL; hdmi_abort_state = 0; spin_unlock_irqrestore(&hdmi_audio_lock, flags); isfr_clk = clk_get(&hdmi_data->pdev->dev, "hdmi_isfr"); if (IS_ERR(isfr_clk)) { ret = PTR_ERR(isfr_clk); dev_err(&hdmi_data->pdev->dev, "Unable to get HDMI isfr clk: %d\n", ret); goto eclkg; } ret = clk_prepare_enable(isfr_clk); if (ret < 0) { dev_err(&pdev->dev, "Cannot enable HDMI clock: %d\n", ret); goto eclke; } pr_debug("%s isfr_clk:%d\n", __func__, (int)clk_get_rate(isfr_clk)); iahb_clk = clk_get(&hdmi_data->pdev->dev, "hdmi_iahb"); if (IS_ERR(iahb_clk)) { ret = PTR_ERR(iahb_clk); dev_err(&hdmi_data->pdev->dev, "Unable to get HDMI iahb clk: %d\n", ret); goto eclkg2; } ret = clk_prepare_enable(iahb_clk); if (ret < 0) { dev_err(&pdev->dev, "Cannot enable HDMI clock: %d\n", ret); goto eclke2; } hdmi_data->reg_phys_base = res->start; if (!request_mem_region(res->start, resource_size(res), dev_name(&pdev->dev))) { dev_err(&pdev->dev, "request_mem_region failed\n"); ret = -EBUSY; goto emem; } hdmi_data->reg_base = ioremap(res->start, resource_size(res)); if (!hdmi_data->reg_base) { dev_err(&pdev->dev, "ioremap failed\n"); ret = -ENOMEM; goto eirq; } hdmi_base = hdmi_data->reg_base; pr_debug("\n%s hdmi hw base = 0x%08x\n\n", __func__, (int)res->start); initialize_hdmi_ih_mutes(); /* Disable HDMI clocks until video/audio sub-drivers are initialized */ clk_disable_unprepare(isfr_clk); clk_disable_unprepare(iahb_clk); /* Replace platform data coming in with a local struct */ platform_set_drvdata(pdev, hdmi_data); return ret; eirq: release_mem_region(res->start, resource_size(res)); emem: clk_disable_unprepare(iahb_clk); eclke2: clk_put(iahb_clk); eclkg2: clk_disable_unprepare(isfr_clk); eclke: clk_put(isfr_clk); eclkg: return ret; }
static int __devinit sdhci_pxav3_probe(struct platform_device *pdev) { struct sdhci_pltfm_host *pltfm_host; struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data; struct device *dev = &pdev->dev; struct sdhci_host *host = NULL; struct sdhci_pxa *pxa = NULL; const struct of_device_id *match; int ret; struct clk *clk; int qos_class = PM_QOS_CPUIDLE_BLOCK; pxa = kzalloc(sizeof(struct sdhci_pxa), GFP_KERNEL); if (!pxa) return -ENOMEM; host = sdhci_pltfm_init(pdev, NULL); if (IS_ERR(host)) { kfree(pxa); return PTR_ERR(host); } pltfm_host = sdhci_priv(host); pltfm_host->priv = pxa; clk = clk_get(dev, "PXA-SDHCLK"); if (IS_ERR(clk)) { dev_err(dev, "failed to get io clock\n"); ret = PTR_ERR(clk); goto err_clk_get; } pltfm_host->clk = clk; clk_prepare_enable(clk); host->quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | SDHCI_QUIRK_32BIT_ADMA_SIZE | SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN; match = of_match_device(of_match_ptr(sdhci_pxav3_of_match), &pdev->dev); if (match) pdata = pxav3_get_mmc_pdata(dev); if (pdata) { pdata->qos_idle.name = mmc_hostname(host->mmc); pm_qos_add_request(&pdata->qos_idle, qos_class, PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE); /* If slot design supports 8 bit data, indicate this to MMC. */ if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT) host->mmc->caps |= MMC_CAP_8_BIT_DATA; if (pdata->flags & PXA_FLAG_ENABLE_CLOCK_GATING) host->mmc->caps2 |= MMC_CAP2_BUS_AUTO_CLK_GATE; if (pdata->flags & PXA_FLAG_DISABLE_PROBE_CDSCAN) host->mmc->caps2 |= MMC_CAP2_DISABLE_PROBE_CDSCAN; if (pdata->quirks) host->quirks |= pdata->quirks; if (pdata->host_caps) host->mmc->caps |= pdata->host_caps; if (pdata->host_caps2) host->mmc->caps2 |= pdata->host_caps2; if (pdata->pm_caps) host->mmc->pm_caps |= pdata->pm_caps; if (pdata->cd_type != PXA_SDHCI_CD_HOST) host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; if (pdata->cd_type == PXA_SDHCI_CD_GPIO && gpio_is_valid(pdata->ext_cd_gpio)) { ret = mmc_gpio_request_cd(host->mmc, pdata->ext_cd_gpio); if (ret) { dev_err(mmc_dev(host->mmc), "failed to allocate card detect gpio\n"); goto err_cd_req; } } else if (pdata->cd_type == PXA_SDHCI_CD_PERMANENT) { /* on-chip device */ host->mmc->caps |= MMC_CAP_NONREMOVABLE; } else if (pdata->cd_type == PXA_SDHCI_CD_NONE) host->mmc->caps |= MMC_CAP_NEEDS_POLL; } host->quirks2 = SDHCI_QUIRK2_NO_CURRENT_LIMIT | SDHCI_QUIRK2_PRESET_VALUE_BROKEN | SDHCI_QUIRK2_TIMEOUT_DIVIDE_4; host->ops = &pxav3_sdhci_ops; if (pdata && pdata->flags & PXA_FLAG_EN_PM_RUNTIME) { pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); pm_runtime_set_autosuspend_delay(&pdev->dev, PXAV3_RPM_DELAY_MS); pm_runtime_use_autosuspend(&pdev->dev); pm_suspend_ignore_children(&pdev->dev, 1); } #ifdef _MMC_SAFE_ACCESS_ mmc_is_available = 1; #endif ret = sdhci_add_host(host); if (ret) { dev_err(&pdev->dev, "failed to add host\n"); if (pdata && pdata->flags & PXA_FLAG_EN_PM_RUNTIME) { pm_runtime_forbid(&pdev->dev); pm_runtime_get_noresume(&pdev->dev); } goto err_add_host; } /* remove the caps that supported by the controller but not available * for certain platforms. */ if (pdata && pdata->host_caps_disable) host->mmc->caps &= ~(pdata->host_caps_disable); platform_set_drvdata(pdev, host); if (pdata && pdata->flags & PXA_FLAG_WAKEUP_HOST) { device_init_wakeup(&pdev->dev, 1); host->mmc->pm_flags |= MMC_PM_WAKE_SDIO_IRQ; } else device_init_wakeup(&pdev->dev, 0); #ifdef CONFIG_SD8XXX_RFKILL if (pdata && pdata->pmmc) *pdata->pmmc = host->mmc; #endif return 0; err_add_host: clk_disable_unprepare(clk); clk_put(clk); if (pdata) pm_qos_remove_request(&pdata->qos_idle); err_cd_req: mmc_gpio_free_cd(host->mmc); err_clk_get: sdhci_pltfm_free(pdev); kfree(pxa); return ret; }
static int imx_keypad_probe(struct platform_device *pdev) { const struct matrix_keymap_data *keymap_data = dev_get_platdata(&pdev->dev); struct imx_keypad *keypad; struct input_dev *input_dev; struct resource *res; int irq, error, i, row, col; if (!keymap_data && !pdev->dev.of_node) { dev_err(&pdev->dev, "no keymap defined\n"); return -EINVAL; } irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "no irq defined in platform data\n"); return -EINVAL; } input_dev = devm_input_allocate_device(&pdev->dev); if (!input_dev) { dev_err(&pdev->dev, "failed to allocate the input device\n"); return -ENOMEM; } keypad = devm_kzalloc(&pdev->dev, sizeof(struct imx_keypad), GFP_KERNEL); if (!keypad) { dev_err(&pdev->dev, "not enough memory for driver data\n"); return -ENOMEM; } keypad->input_dev = input_dev; keypad->irq = irq; keypad->stable_count = 0; setup_timer(&keypad->check_matrix_timer, imx_keypad_check_for_events, (unsigned long) keypad); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); keypad->mmio_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(keypad->mmio_base)) return PTR_ERR(keypad->mmio_base); keypad->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(keypad->clk)) { dev_err(&pdev->dev, "failed to get keypad clock\n"); return PTR_ERR(keypad->clk); } /* Init the Input device */ input_dev->name = pdev->name; input_dev->id.bustype = BUS_HOST; input_dev->dev.parent = &pdev->dev; input_dev->open = imx_keypad_open; input_dev->close = imx_keypad_close; error = matrix_keypad_build_keymap(keymap_data, NULL, MAX_MATRIX_KEY_ROWS, MAX_MATRIX_KEY_COLS, keypad->keycodes, input_dev); if (error) { dev_err(&pdev->dev, "failed to build keymap\n"); return error; } /* Search for rows and cols enabled */ for (row = 0; row < MAX_MATRIX_KEY_ROWS; row++) { for (col = 0; col < MAX_MATRIX_KEY_COLS; col++) { i = MATRIX_SCAN_CODE(row, col, MATRIX_ROW_SHIFT); if (keypad->keycodes[i] != KEY_RESERVED) { keypad->rows_en_mask |= 1 << row; keypad->cols_en_mask |= 1 << col; } } } dev_dbg(&pdev->dev, "enabled rows mask: %x\n", keypad->rows_en_mask); dev_dbg(&pdev->dev, "enabled cols mask: %x\n", keypad->cols_en_mask); __set_bit(EV_REP, input_dev->evbit); input_set_capability(input_dev, EV_MSC, MSC_SCAN); input_set_drvdata(input_dev, keypad); /* Ensure that the keypad will stay dormant until opened */ clk_prepare_enable(keypad->clk); imx_keypad_inhibit(keypad); clk_disable_unprepare(keypad->clk); error = devm_request_irq(&pdev->dev, irq, imx_keypad_irq_handler, 0, pdev->name, keypad); if (error) { dev_err(&pdev->dev, "failed to request IRQ\n"); return error; } /* Register the input device */ error = input_register_device(input_dev); if (error) { dev_err(&pdev->dev, "failed to register input device\n"); return error; } platform_set_drvdata(pdev, keypad); device_init_wakeup(&pdev->dev, 1); return 0; }
/** * Power on request. * * Set clocks to ON. * Set sensors chip-select GPIO to non-reset (on) value. * */ static int dsps_power_on_handler(void) { int ret = 0; int i, ci, gi, ri; pr_debug("%s.\n", __func__); if (drv->is_on) { pr_debug("%s: already ON.\n", __func__); return 0; } for (ci = 0; ci < drv->pdata->clks_num; ci++) { const char *name = drv->pdata->clks[ci].name; u32 rate = drv->pdata->clks[ci].rate; struct clk *clock = drv->pdata->clks[ci].clock; if (clock == NULL) continue; if (rate > 0) { ret = clk_set_rate(clock, rate); pr_debug("%s: clk %s set rate %d.", __func__, name, rate); if (ret) { pr_err("%s: clk %s set rate %d. err=%d.", __func__, name, rate, ret); goto clk_err; } } ret = clk_prepare_enable(clock); if (ret) { pr_err("%s: enable clk %s err %d.", __func__, name, ret); goto clk_err; } } for (gi = 0; gi < drv->pdata->gpios_num; gi++) { const char *name = drv->pdata->gpios[gi].name; int num = drv->pdata->gpios[gi].num; int val = drv->pdata->gpios[gi].on_val; int is_owner = drv->pdata->gpios[gi].is_owner; if (!is_owner) continue; ret = gpio_direction_output(num, val); if (ret) { pr_err("%s: set GPIO %s num %d to %d err %d.", __func__, name, num, val, ret); goto gpio_err; } } for (ri = 0; ri < drv->pdata->regs_num; ri++) { const char *name = drv->pdata->regs[ri].name; struct regulator *reg = drv->pdata->regs[ri].reg; int volt = drv->pdata->regs[ri].volt; if (reg == NULL) continue; pr_debug("%s: set regulator %s.", __func__, name); ret = regulator_set_voltage(reg, volt, volt); if (ret) { pr_err("%s: set regulator %s voltage %d err = %d.\n", __func__, name, volt, ret); goto reg_err; } ret = regulator_enable(reg); if (ret) { pr_err("%s: enable regulator %s err = %d.\n", __func__, name, ret); goto reg_err; } } drv->is_on = true; return 0; /* * If failling to set ANY clock/gpio/regulator to ON then we set * them back to OFF to avoid consuming power for unused * clocks/gpios/regulators. */ reg_err: for (i = 0; i < ri; i++) { struct regulator *reg = drv->pdata->regs[ri].reg; if (reg == NULL) continue; regulator_disable(reg); } gpio_err: for (i = 0; i < gi; i++) { int num = drv->pdata->gpios[i].num; int val = drv->pdata->gpios[i].off_val; int is_owner = drv->pdata->gpios[i].is_owner; if (!is_owner) continue; ret = gpio_direction_output(num, val); } clk_err: for (i = 0; i < ci; i++) { struct clk *clock = drv->pdata->clks[i].clock; if (clock == NULL) continue; clk_disable_unprepare(clock); } return -ENODEV; }