static int spear_rtc_probe(struct platform_device *pdev) { struct resource *res; struct spear_rtc_config *config; int status = 0; int irq; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "no resource defined\n"); return -EBUSY; } config = devm_kzalloc(&pdev->dev, sizeof(*config), GFP_KERNEL); if (!config) { dev_err(&pdev->dev, "out of memory\n"); return -ENOMEM; } /* alarm irqs */ irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "no update irq?\n"); return irq; } status = devm_request_irq(&pdev->dev, irq, spear_rtc_irq, 0, pdev->name, config); if (status) { dev_err(&pdev->dev, "Alarm interrupt IRQ%d already claimed\n", irq); return status; } config->ioaddr = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(config->ioaddr)) return PTR_ERR(config->ioaddr); config->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(config->clk)) return PTR_ERR(config->clk); status = clk_prepare_enable(config->clk); if (status < 0) return status; spin_lock_init(&config->lock); platform_set_drvdata(pdev, config); config->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, &spear_rtc_ops, THIS_MODULE); if (IS_ERR(config->rtc)) { dev_err(&pdev->dev, "can't register RTC device, err %ld\n", PTR_ERR(config->rtc)); status = PTR_ERR(config->rtc); goto err_disable_clock; } config->rtc->uie_unsupported = 1; if (!device_can_wakeup(&pdev->dev)) device_init_wakeup(&pdev->dev, 1); return 0; err_disable_clock: clk_disable_unprepare(config->clk); return status; }
static int tmc_probe(struct amba_device *adev, const struct amba_id *id) { int ret = 0; u32 devid; void __iomem *base; struct device *dev = &adev->dev; struct coresight_platform_data *pdata = NULL; struct tmc_drvdata *drvdata; struct resource *res = &adev->res; struct coresight_desc desc = { 0 }; struct device_node *np = adev->dev.of_node; if (np) { pdata = of_get_coresight_platform_data(dev, np); if (IS_ERR(pdata)) { ret = PTR_ERR(pdata); goto out; } adev->dev.platform_data = pdata; } ret = -ENOMEM; drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); if (!drvdata) goto out; drvdata->dev = &adev->dev; dev_set_drvdata(dev, drvdata); /* Validity for the resource is already checked by the AMBA core */ base = devm_ioremap_resource(dev, res); if (IS_ERR(base)) { ret = PTR_ERR(base); goto out; } drvdata->base = base; spin_lock_init(&drvdata->spinlock); devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID); drvdata->config_type = BMVAL(devid, 6, 7); drvdata->memwidth = tmc_get_memwidth(devid); if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) { if (np) ret = of_property_read_u32(np, "arm,buffer-size", &drvdata->size); if (ret) drvdata->size = SZ_1M; } else { drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4; } pm_runtime_put(&adev->dev); desc.pdata = pdata; desc.dev = dev; desc.groups = coresight_tmc_groups; switch (drvdata->config_type) { case TMC_CONFIG_TYPE_ETB: desc.type = CORESIGHT_DEV_TYPE_SINK; desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER; desc.ops = &tmc_etb_cs_ops; break; case TMC_CONFIG_TYPE_ETR: desc.type = CORESIGHT_DEV_TYPE_SINK; desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER; desc.ops = &tmc_etr_cs_ops; ret = tmc_etr_setup_caps(drvdata, devid, id->data); if (ret) goto out; break; case TMC_CONFIG_TYPE_ETF: desc.type = CORESIGHT_DEV_TYPE_LINKSINK; desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO; desc.ops = &tmc_etf_cs_ops; break; default: pr_err("%s: Unsupported TMC config\n", pdata->name); ret = -EINVAL; goto out; } drvdata->csdev = coresight_register(&desc); if (IS_ERR(drvdata->csdev)) { ret = PTR_ERR(drvdata->csdev); goto out; } drvdata->miscdev.name = pdata->name; drvdata->miscdev.minor = MISC_DYNAMIC_MINOR; drvdata->miscdev.fops = &tmc_fops; ret = misc_register(&drvdata->miscdev); if (ret) coresight_unregister(drvdata->csdev); out: return ret; }
static int asm9260_rtc_probe(struct platform_device *pdev) { struct asm9260_rtc_priv *priv; struct device *dev = &pdev->dev; struct resource *res; int irq_alarm, ret; u32 ccr; priv = devm_kzalloc(dev, sizeof(struct asm9260_rtc_priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->dev = &pdev->dev; platform_set_drvdata(pdev, priv); irq_alarm = platform_get_irq(pdev, 0); if (irq_alarm < 0) { dev_err(dev, "No alarm IRQ resource defined\n"); return irq_alarm; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); priv->iobase = devm_ioremap_resource(dev, res); if (IS_ERR(priv->iobase)) return PTR_ERR(priv->iobase); priv->clk = devm_clk_get(dev, "ahb"); ret = clk_prepare_enable(priv->clk); if (ret) { dev_err(dev, "Failed to enable clk!\n"); return ret; } ccr = ioread32(priv->iobase + HW_CCR); /* if dev is not enabled, reset it */ if ((ccr & (BM_CLKEN | BM_CTCRST)) != BM_CLKEN) { iowrite32(BM_CTCRST, priv->iobase + HW_CCR); ccr = 0; } iowrite32(BM_CLKEN | ccr, priv->iobase + HW_CCR); iowrite32(0, priv->iobase + HW_CIIR); iowrite32(BM_AMR_OFF, priv->iobase + HW_AMR); priv->rtc = devm_rtc_device_register(dev, dev_name(dev), &asm9260_rtc_ops, THIS_MODULE); if (IS_ERR(priv->rtc)) { ret = PTR_ERR(priv->rtc); dev_err(dev, "Failed to register RTC device: %d\n", ret); goto err_return; } ret = devm_request_threaded_irq(dev, irq_alarm, NULL, asm9260_rtc_irq, IRQF_ONESHOT, dev_name(dev), priv); if (ret < 0) { dev_err(dev, "can't get irq %i, err %d\n", irq_alarm, ret); goto err_return; } return 0; err_return: clk_disable_unprepare(priv->clk); return ret; }
static int stm32_qspi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct spi_controller *ctrl; struct reset_control *rstc; struct stm32_qspi *qspi; struct resource *res; int ret, irq; ctrl = spi_alloc_master(dev, sizeof(*qspi)); if (!ctrl) return -ENOMEM; qspi = spi_controller_get_devdata(ctrl); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi"); qspi->io_base = devm_ioremap_resource(dev, res); if (IS_ERR(qspi->io_base)) return PTR_ERR(qspi->io_base); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mm"); qspi->mm_base = devm_ioremap_resource(dev, res); if (IS_ERR(qspi->mm_base)) return PTR_ERR(qspi->mm_base); qspi->mm_size = resource_size(res); if (qspi->mm_size > STM32_QSPI_MAX_MMAP_SZ) return -EINVAL; irq = platform_get_irq(pdev, 0); ret = devm_request_irq(dev, irq, stm32_qspi_irq, 0, dev_name(dev), qspi); if (ret) { dev_err(dev, "failed to request irq\n"); return ret; } init_completion(&qspi->data_completion); qspi->clk = devm_clk_get(dev, NULL); if (IS_ERR(qspi->clk)) return PTR_ERR(qspi->clk); qspi->clk_rate = clk_get_rate(qspi->clk); if (!qspi->clk_rate) return -EINVAL; ret = clk_prepare_enable(qspi->clk); if (ret) { dev_err(dev, "can not enable the clock\n"); return ret; } rstc = devm_reset_control_get_exclusive(dev, NULL); if (!IS_ERR(rstc)) { reset_control_assert(rstc); udelay(2); reset_control_deassert(rstc); } qspi->dev = dev; platform_set_drvdata(pdev, qspi); mutex_init(&qspi->lock); ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD; ctrl->setup = stm32_qspi_setup; ctrl->bus_num = -1; ctrl->mem_ops = &stm32_qspi_mem_ops; ctrl->num_chipselect = STM32_QSPI_MAX_NORCHIP; ctrl->dev.of_node = dev->of_node; ret = devm_spi_register_master(dev, ctrl); if (ret) goto err_spi_register; return 0; err_spi_register: stm32_qspi_release(qspi); return ret; }
static int st_dwc3_probe(struct platform_device *pdev) { struct st_dwc3 *dwc3_data; struct resource *res; struct device *dev = &pdev->dev; struct device_node *node = dev->of_node, *child; struct regmap *regmap; int ret; dwc3_data = devm_kzalloc(dev, sizeof(*dwc3_data), GFP_KERNEL); if (!dwc3_data) return -ENOMEM; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg-glue"); dwc3_data->glue_base = devm_ioremap_resource(dev, res); if (IS_ERR(dwc3_data->glue_base)) return PTR_ERR(dwc3_data->glue_base); regmap = syscon_regmap_lookup_by_phandle(node, "st,syscfg"); if (IS_ERR(regmap)) return PTR_ERR(regmap); dma_set_coherent_mask(dev, dev->coherent_dma_mask); dwc3_data->dev = dev; dwc3_data->regmap = regmap; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "syscfg-reg"); if (!res) { ret = -ENXIO; goto undo_platform_dev_alloc; } dwc3_data->syscfg_reg_off = res->start; dev_vdbg(&pdev->dev, "glue-logic addr 0x%p, syscfg-reg offset 0x%x\n", dwc3_data->glue_base, dwc3_data->syscfg_reg_off); dwc3_data->rstc_pwrdn = devm_reset_control_get(dev, "powerdown"); if (IS_ERR(dwc3_data->rstc_pwrdn)) { dev_err(&pdev->dev, "could not get power controller\n"); ret = PTR_ERR(dwc3_data->rstc_pwrdn); goto undo_platform_dev_alloc; } /* Manage PowerDown */ reset_control_deassert(dwc3_data->rstc_pwrdn); dwc3_data->rstc_rst = devm_reset_control_get(dev, "softreset"); if (IS_ERR(dwc3_data->rstc_rst)) { dev_err(&pdev->dev, "could not get reset controller\n"); ret = PTR_ERR(dwc3_data->rstc_pwrdn); goto undo_powerdown; } /* Manage SoftReset */ reset_control_deassert(dwc3_data->rstc_rst); child = of_get_child_by_name(node, "dwc3"); if (!child) { dev_err(&pdev->dev, "failed to find dwc3 core node\n"); ret = -ENODEV; goto undo_softreset; } dwc3_data->dr_mode = of_usb_get_dr_mode(child); /* Allocate and initialize the core */ ret = of_platform_populate(node, NULL, NULL, dev); if (ret) { dev_err(dev, "failed to add dwc3 core\n"); goto undo_softreset; } /* * Configure the USB port as device or host according to the static * configuration passed from DT. * DRD is the only mode currently supported so this will be enhanced * as soon as OTG is available. */ ret = st_dwc3_drd_init(dwc3_data); if (ret) { dev_err(dev, "drd initialisation failed\n"); goto undo_softreset; } /* ST glue logic init */ st_dwc3_init(dwc3_data); platform_set_drvdata(pdev, dwc3_data); return 0; undo_softreset: reset_control_assert(dwc3_data->rstc_rst); undo_powerdown: reset_control_assert(dwc3_data->rstc_pwrdn); undo_platform_dev_alloc: platform_device_put(pdev); return ret; }
static int spi_qup_probe(struct platform_device *pdev) { struct spi_master *master; struct clk *iclk, *cclk; struct spi_qup *controller; struct resource *res; struct device *dev; void __iomem *base; u32 max_freq, iomode; int ret, irq, size; dev = &pdev->dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); base = devm_ioremap_resource(dev, res); if (IS_ERR(base)) return PTR_ERR(base); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; cclk = devm_clk_get(dev, "core"); if (IS_ERR(cclk)) return PTR_ERR(cclk); iclk = devm_clk_get(dev, "iface"); if (IS_ERR(iclk)) return PTR_ERR(iclk); /* This is optional parameter */ if (of_property_read_u32(dev->of_node, "spi-max-frequency", &max_freq)) max_freq = SPI_MAX_RATE; if (!max_freq || max_freq > SPI_MAX_RATE) { dev_err(dev, "invalid clock frequency %d\n", max_freq); return -ENXIO; } ret = clk_prepare_enable(cclk); if (ret) { dev_err(dev, "cannot enable core clock\n"); return ret; } ret = clk_prepare_enable(iclk); if (ret) { clk_disable_unprepare(cclk); dev_err(dev, "cannot enable iface clock\n"); return ret; } master = spi_alloc_master(dev, sizeof(struct spi_qup)); if (!master) { clk_disable_unprepare(cclk); clk_disable_unprepare(iclk); dev_err(dev, "cannot allocate master\n"); return -ENOMEM; } /* use num-cs unless not present or out of range */ if (of_property_read_u16(dev->of_node, "num-cs", &master->num_chipselect) || (master->num_chipselect > SPI_NUM_CHIPSELECTS)) master->num_chipselect = SPI_NUM_CHIPSELECTS; master->bus_num = pdev->id; master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); master->max_speed_hz = max_freq; master->transfer_one = spi_qup_transfer_one; master->dev.of_node = pdev->dev.of_node; master->auto_runtime_pm = true; platform_set_drvdata(pdev, master); controller = spi_master_get_devdata(master); controller->dev = dev; controller->base = base; controller->iclk = iclk; controller->cclk = cclk; controller->irq = irq; /* set v1 flag if device is version 1 */ if (of_device_is_compatible(dev->of_node, "qcom,spi-qup-v1.1.1")) controller->qup_v1 = 1; spin_lock_init(&controller->lock); init_completion(&controller->done); iomode = readl_relaxed(base + QUP_IO_M_MODES); size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode); if (size) controller->out_blk_sz = size * 16; else controller->out_blk_sz = 4; size = QUP_IO_M_INPUT_BLOCK_SIZE(iomode); if (size) controller->in_blk_sz = size * 16; else controller->in_blk_sz = 4; size = QUP_IO_M_OUTPUT_FIFO_SIZE(iomode); controller->out_fifo_sz = controller->out_blk_sz * (2 << size); size = QUP_IO_M_INPUT_FIFO_SIZE(iomode); controller->in_fifo_sz = controller->in_blk_sz * (2 << size); dev_info(dev, "IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n", controller->in_blk_sz, controller->in_fifo_sz, controller->out_blk_sz, controller->out_fifo_sz); writel_relaxed(1, base + QUP_SW_RESET); ret = spi_qup_set_state(controller, QUP_STATE_RESET); if (ret) { dev_err(dev, "cannot set RESET state\n"); goto error; } writel_relaxed(0, base + QUP_OPERATIONAL); writel_relaxed(0, base + QUP_IO_M_MODES); if (!controller->qup_v1) writel_relaxed(0, base + QUP_OPERATIONAL_MASK); writel_relaxed(SPI_ERROR_CLK_UNDER_RUN | SPI_ERROR_CLK_OVER_RUN, base + SPI_ERROR_FLAGS_EN); /* if earlier version of the QUP, disable INPUT_OVERRUN */ if (controller->qup_v1) writel_relaxed(QUP_ERROR_OUTPUT_OVER_RUN | QUP_ERROR_INPUT_UNDER_RUN | QUP_ERROR_OUTPUT_UNDER_RUN, base + QUP_ERROR_FLAGS_EN); writel_relaxed(0, base + SPI_CONFIG); writel_relaxed(SPI_IO_C_NO_TRI_STATE, base + SPI_IO_CONTROL); ret = devm_request_irq(dev, irq, spi_qup_qup_irq, IRQF_TRIGGER_HIGH, pdev->name, controller); if (ret) goto error; pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC); pm_runtime_use_autosuspend(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); ret = devm_spi_register_master(dev, master); if (ret) goto disable_pm; return 0; disable_pm: pm_runtime_disable(&pdev->dev); error: clk_disable_unprepare(cclk); clk_disable_unprepare(iclk); spi_master_put(master); return ret; }
static int vf610_adc_probe(struct platform_device *pdev) { struct vf610_adc *info; struct iio_dev *indio_dev; struct resource *mem; int irq; int ret; indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(struct vf610_adc)); if (!indio_dev) { dev_err(&pdev->dev, "Failed allocating iio device\n"); return -ENOMEM; } info = iio_priv(indio_dev); info->dev = &pdev->dev; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); info->regs = devm_ioremap_resource(&pdev->dev, mem); if (IS_ERR(info->regs)) return PTR_ERR(info->regs); irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "no irq resource?\n"); return irq; } ret = devm_request_irq(info->dev, irq, vf610_adc_isr, 0, dev_name(&pdev->dev), info); if (ret < 0) { dev_err(&pdev->dev, "failed requesting irq, irq = %d\n", irq); return ret; } info->clk = devm_clk_get(&pdev->dev, "adc"); if (IS_ERR(info->clk)) { dev_err(&pdev->dev, "failed getting clock, err = %ld\n", PTR_ERR(info->clk)); return PTR_ERR(info->clk); } info->vref = devm_regulator_get(&pdev->dev, "vref"); if (IS_ERR(info->vref)) return PTR_ERR(info->vref); ret = regulator_enable(info->vref); if (ret) return ret; info->vref_uv = regulator_get_voltage(info->vref); of_property_read_u32_array(pdev->dev.of_node, "fsl,adck-max-frequency", info->max_adck_rate, 3); platform_set_drvdata(pdev, indio_dev); init_completion(&info->completion); indio_dev->name = dev_name(&pdev->dev); indio_dev->dev.parent = &pdev->dev; indio_dev->dev.of_node = pdev->dev.of_node; indio_dev->info = &vf610_adc_iio_info; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = vf610_adc_iio_channels; indio_dev->num_channels = ARRAY_SIZE(vf610_adc_iio_channels); ret = clk_prepare_enable(info->clk); if (ret) { dev_err(&pdev->dev, "Could not prepare or enable the clock.\n"); goto error_adc_clk_enable; } vf610_adc_cfg_init(info); vf610_adc_hw_init(info); ret = iio_device_register(indio_dev); if (ret) { dev_err(&pdev->dev, "Couldn't register the device.\n"); goto error_iio_device_register; } return 0; error_iio_device_register: clk_disable_unprepare(info->clk); error_adc_clk_enable: regulator_disable(info->vref); return ret; }
static int qcom_wdt_probe(struct platform_device *pdev) { struct qcom_wdt *wdt; struct resource *res; struct device_node *np = pdev->dev.of_node; const u32 *regs; u32 percpu_offset; int ret; regs = of_device_get_match_data(&pdev->dev); if (!regs) { dev_err(&pdev->dev, "Unsupported QCOM WDT module\n"); return -ENODEV; } wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL); if (!wdt) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); /* We use CPU0's DGT for the watchdog */ if (of_property_read_u32(np, "cpu-offset", &percpu_offset)) percpu_offset = 0; res->start += percpu_offset; res->end += percpu_offset; wdt->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(wdt->base)) return PTR_ERR(wdt->base); wdt->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(wdt->clk)) { dev_err(&pdev->dev, "failed to get input clock\n"); return PTR_ERR(wdt->clk); } ret = clk_prepare_enable(wdt->clk); if (ret) { dev_err(&pdev->dev, "failed to setup clock\n"); return ret; } /* * We use the clock rate to calculate the max timeout, so ensure it's * not zero to avoid a divide-by-zero exception. * * WATCHDOG_CORE assumes units of seconds, if the WDT is clocked such * that it would bite before a second elapses it's usefulness is * limited. Bail if this is the case. */ wdt->rate = clk_get_rate(wdt->clk); if (wdt->rate == 0 || wdt->rate > 0x10000000U) { dev_err(&pdev->dev, "invalid clock rate\n"); ret = -EINVAL; goto err_clk_unprepare; } wdt->wdd.info = &qcom_wdt_info; wdt->wdd.ops = &qcom_wdt_ops; wdt->wdd.min_timeout = 1; wdt->wdd.max_timeout = 0x10000000U / wdt->rate; wdt->wdd.parent = &pdev->dev; wdt->layout = regs; if (readl(wdt_addr(wdt, WDT_STS)) & 1) wdt->wdd.bootstatus = WDIOF_CARDRESET; /* * If 'timeout-sec' unspecified in devicetree, assume a 30 second * default, unless the max timeout is less than 30 seconds, then use * the max instead. */ wdt->wdd.timeout = min(wdt->wdd.max_timeout, 30U); watchdog_init_timeout(&wdt->wdd, 0, &pdev->dev); ret = watchdog_register_device(&wdt->wdd); if (ret) { dev_err(&pdev->dev, "failed to register watchdog\n"); goto err_clk_unprepare; } platform_set_drvdata(pdev, wdt); return 0; err_clk_unprepare: clk_disable_unprepare(wdt->clk); return ret; }
static int autcpu12_nvram_probe(struct platform_device *pdev) { map_word tmp, save0, save1; struct resource *res; struct autcpu12_nvram_priv *priv; priv = devm_kzalloc(&pdev->dev, sizeof(struct autcpu12_nvram_priv), GFP_KERNEL); if (!priv) return -ENOMEM; platform_set_drvdata(pdev, priv); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "failed to get memory resource\n"); return -ENOENT; } priv->map.bankwidth = 4; priv->map.phys = res->start; priv->map.size = resource_size(res); priv->map.virt = devm_ioremap_resource(&pdev->dev, res); strcpy((char *)priv->map.name, res->name); if (IS_ERR(priv->map.virt)) return PTR_ERR(priv->map.virt); simple_map_init(&priv->map); /* * Check for 32K/128K * read ofs 0 * read ofs 0x10000 * Write complement to ofs 0x100000 * Read and check result on ofs 0x0 * Restore contents */ save0 = map_read(&priv->map, 0); save1 = map_read(&priv->map, 0x10000); tmp.x[0] = ~save0.x[0]; map_write(&priv->map, tmp, 0x10000); tmp = map_read(&priv->map, 0); /* if we find this pattern on 0x0, we have 32K size */ if (!map_word_equal(&priv->map, tmp, save0)) { map_write(&priv->map, save0, 0x0); priv->map.size = SZ_32K; } else map_write(&priv->map, save1, 0x10000); priv->mtd = do_map_probe("map_ram", &priv->map); if (!priv->mtd) { dev_err(&pdev->dev, "probing failed\n"); return -ENXIO; } priv->mtd->owner = THIS_MODULE; priv->mtd->erasesize = 16; priv->mtd->dev.parent = &pdev->dev; if (!mtd_device_register(priv->mtd, NULL, 0)) { dev_info(&pdev->dev, "NV-RAM device size %ldKiB registered on AUTCPU12\n", priv->map.size / SZ_1K); return 0; } map_destroy(priv->mtd); dev_err(&pdev->dev, "NV-RAM device addition failed\n"); return -ENOMEM; }
static int aspeed_vuart_probe(struct platform_device *pdev) { struct uart_8250_port port; struct aspeed_vuart *vuart; struct device_node *np; struct resource *res; u32 clk, prop; int rc; np = pdev->dev.of_node; vuart = devm_kzalloc(&pdev->dev, sizeof(*vuart), GFP_KERNEL); if (!vuart) return -ENOMEM; vuart->dev = &pdev->dev; timer_setup(&vuart->unthrottle_timer, aspeed_vuart_unthrottle_exp, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); vuart->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(vuart->regs)) return PTR_ERR(vuart->regs); memset(&port, 0, sizeof(port)); port.port.private_data = vuart; port.port.membase = vuart->regs; port.port.mapbase = res->start; port.port.mapsize = resource_size(res); port.port.startup = aspeed_vuart_startup; port.port.shutdown = aspeed_vuart_shutdown; port.port.throttle = aspeed_vuart_throttle; port.port.unthrottle = aspeed_vuart_unthrottle; port.port.status = UPSTAT_SYNC_FIFO; port.port.dev = &pdev->dev; rc = sysfs_create_group(&vuart->dev->kobj, &aspeed_vuart_attr_group); if (rc < 0) return rc; if (of_property_read_u32(np, "clock-frequency", &clk)) { vuart->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(vuart->clk)) { dev_warn(&pdev->dev, "clk or clock-frequency not defined\n"); rc = PTR_ERR(vuart->clk); goto err_sysfs_remove; } rc = clk_prepare_enable(vuart->clk); if (rc < 0) goto err_sysfs_remove; clk = clk_get_rate(vuart->clk); } /* If current-speed was set, then try not to change it. */ if (of_property_read_u32(np, "current-speed", &prop) == 0) port.port.custom_divisor = clk / (16 * prop); /* Check for shifted address mapping */ if (of_property_read_u32(np, "reg-offset", &prop) == 0) port.port.mapbase += prop; /* Check for registers offset within the devices address range */ if (of_property_read_u32(np, "reg-shift", &prop) == 0) port.port.regshift = prop; /* Check for fifo size */ if (of_property_read_u32(np, "fifo-size", &prop) == 0) port.port.fifosize = prop; /* Check for a fixed line number */ rc = of_alias_get_id(np, "serial"); if (rc >= 0) port.port.line = rc; port.port.irq = irq_of_parse_and_map(np, 0); port.port.irqflags = IRQF_SHARED; port.port.handle_irq = aspeed_vuart_handle_irq; port.port.iotype = UPIO_MEM; port.port.type = PORT_16550A; port.port.uartclk = clk; port.port.flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF | UPF_FIXED_PORT | UPF_FIXED_TYPE | UPF_NO_THRE_TEST; if (of_property_read_bool(np, "no-loopback-test")) port.port.flags |= UPF_SKIP_TEST; if (port.port.fifosize) port.capabilities = UART_CAP_FIFO; if (of_property_read_bool(np, "auto-flow-control")) port.capabilities |= UART_CAP_AFE; rc = serial8250_register_8250_port(&port); if (rc < 0) goto err_clk_disable; vuart->line = rc; aspeed_vuart_set_enabled(vuart, true); aspeed_vuart_set_host_tx_discard(vuart, true); platform_set_drvdata(pdev, vuart); return 0; err_clk_disable: clk_disable_unprepare(vuart->clk); irq_dispose_mapping(port.port.irq); err_sysfs_remove: sysfs_remove_group(&vuart->dev->kobj, &aspeed_vuart_attr_group); return rc; }
static int ltq_mtd_probe(struct platform_device *pdev) { struct mtd_part_parser_data ppdata; struct ltq_mtd *ltq_mtd; struct cfi_private *cfi; int err; if (of_machine_is_compatible("lantiq,falcon") && (ltq_boot_select() != BS_FLASH)) { dev_err(&pdev->dev, "invalid bootstrap options\n"); return -ENODEV; } ltq_mtd = devm_kzalloc(&pdev->dev, sizeof(struct ltq_mtd), GFP_KERNEL); if (!ltq_mtd) return -ENOMEM; platform_set_drvdata(pdev, ltq_mtd); ltq_mtd->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!ltq_mtd->res) { dev_err(&pdev->dev, "failed to get memory resource\n"); return -ENOENT; } ltq_mtd->map = devm_kzalloc(&pdev->dev, sizeof(struct map_info), GFP_KERNEL); if (!ltq_mtd->map) return -ENOMEM; ltq_mtd->map->phys = ltq_mtd->res->start; ltq_mtd->map->size = resource_size(ltq_mtd->res); ltq_mtd->map->virt = devm_ioremap_resource(&pdev->dev, ltq_mtd->res); if (IS_ERR(ltq_mtd->map->virt)) return PTR_ERR(ltq_mtd->map->virt); ltq_mtd->map->name = ltq_map_name; ltq_mtd->map->bankwidth = 2; ltq_mtd->map->read = ltq_read16; ltq_mtd->map->write = ltq_write16; ltq_mtd->map->copy_from = ltq_copy_from; ltq_mtd->map->copy_to = ltq_copy_to; ltq_mtd->map->map_priv_1 = LTQ_NOR_PROBING; ltq_mtd->mtd = do_map_probe("cfi_probe", ltq_mtd->map); ltq_mtd->map->map_priv_1 = LTQ_NOR_NORMAL; if (!ltq_mtd->mtd) { dev_err(&pdev->dev, "probing failed\n"); return -ENXIO; } ltq_mtd->mtd->owner = THIS_MODULE; cfi = ltq_mtd->map->fldrv_priv; cfi->addr_unlock1 ^= 1; cfi->addr_unlock2 ^= 1; ppdata.of_node = pdev->dev.of_node; err = mtd_device_parse_register(ltq_mtd->mtd, ltq_probe_types, &ppdata, NULL, 0); if (err) { dev_err(&pdev->dev, "failed to add partitions\n"); goto err_destroy; } return 0; err_destroy: map_destroy(ltq_mtd->mtd); return err; }
/** * cdns_spi_probe - Probe method for the SPI driver * @pdev: Pointer to the platform_device structure * * This function initializes the driver data structures and the hardware. * * Return: 0 on success and error value on error */ static int cdns_spi_probe(struct platform_device *pdev) { int ret = 0, irq; struct spi_master *master; struct cdns_spi *xspi; struct resource *res; u32 num_cs; master = spi_alloc_master(&pdev->dev, sizeof(*xspi)); if (master == NULL) return -ENOMEM; xspi = spi_master_get_devdata(master); master->dev.of_node = pdev->dev.of_node; platform_set_drvdata(pdev, master); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); xspi->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(xspi->regs)) { ret = PTR_ERR(xspi->regs); goto remove_master; } xspi->pclk = devm_clk_get(&pdev->dev, "pclk"); if (IS_ERR(xspi->pclk)) { dev_err(&pdev->dev, "pclk clock not found.\n"); ret = PTR_ERR(xspi->pclk); goto remove_master; } xspi->ref_clk = devm_clk_get(&pdev->dev, "ref_clk"); if (IS_ERR(xspi->ref_clk)) { dev_err(&pdev->dev, "ref_clk clock not found.\n"); ret = PTR_ERR(xspi->ref_clk); goto remove_master; } ret = clk_prepare_enable(xspi->pclk); if (ret) { dev_err(&pdev->dev, "Unable to enable APB clock.\n"); goto remove_master; } ret = clk_prepare_enable(xspi->ref_clk); if (ret) { dev_err(&pdev->dev, "Unable to enable device clock.\n"); goto clk_dis_apb; } /* SPI controller initializations */ cdns_spi_init_hw(xspi); irq = platform_get_irq(pdev, 0); if (irq <= 0) { ret = -ENXIO; dev_err(&pdev->dev, "irq number is invalid\n"); goto remove_master; } ret = devm_request_irq(&pdev->dev, irq, cdns_spi_irq, 0, pdev->name, master); if (ret != 0) { ret = -ENXIO; dev_err(&pdev->dev, "request_irq failed\n"); goto remove_master; } ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs); if (ret < 0) master->num_chipselect = CDNS_SPI_DEFAULT_NUM_CS; else master->num_chipselect = num_cs; ret = of_property_read_u32(pdev->dev.of_node, "is-decoded-cs", &xspi->is_decoded_cs); if (ret < 0) xspi->is_decoded_cs = 0; master->prepare_transfer_hardware = cdns_prepare_transfer_hardware; master->prepare_message = cdns_prepare_message; master->transfer_one = cdns_transfer_one; master->unprepare_transfer_hardware = cdns_unprepare_transfer_hardware; master->set_cs = cdns_spi_chipselect; master->mode_bits = SPI_CPOL | SPI_CPHA; /* Set to default valid value */ master->max_speed_hz = clk_get_rate(xspi->ref_clk) / 4; xspi->speed_hz = master->max_speed_hz; master->bits_per_word_mask = SPI_BPW_MASK(8); ret = spi_register_master(master); if (ret) { dev_err(&pdev->dev, "spi_register_master failed\n"); goto clk_dis_all; } return ret; clk_dis_all: clk_disable_unprepare(xspi->ref_clk); clk_dis_apb: clk_disable_unprepare(xspi->pclk); remove_master: spi_master_put(master); return ret; }
static int omap_control_usb_probe(struct platform_device *pdev) { struct resource *res; struct device_node *np = pdev->dev.of_node; struct omap_control_usb_platform_data *pdata = dev_get_platdata(&pdev->dev); control_usb = devm_kzalloc(&pdev->dev, sizeof(*control_usb), GFP_KERNEL); if (!control_usb) { dev_err(&pdev->dev, "unable to alloc memory for control usb\n"); return -ENOMEM; } if (np) { of_property_read_u32(np, "ti,type", &control_usb->type); } else if (pdata) { control_usb->type = pdata->type; } else { dev_err(&pdev->dev, "no pdata present\n"); return -EINVAL; } control_usb->dev = &pdev->dev; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "control_dev_conf"); control_usb->dev_conf = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(control_usb->dev_conf)) return PTR_ERR(control_usb->dev_conf); if (control_usb->type == OMAP_CTRL_DEV_TYPE1) { res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "otghs_control"); control_usb->otghs_control = devm_ioremap_resource( &pdev->dev, res); if (IS_ERR(control_usb->otghs_control)) return PTR_ERR(control_usb->otghs_control); } if (control_usb->type == OMAP_CTRL_DEV_TYPE2) { res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy_power_usb"); control_usb->phy_power = devm_ioremap_resource( &pdev->dev, res); if (IS_ERR(control_usb->phy_power)) return PTR_ERR(control_usb->phy_power); control_usb->sys_clk = devm_clk_get(control_usb->dev, "sys_clkin"); if (IS_ERR(control_usb->sys_clk)) { pr_err("%s: unable to get sys_clkin\n", __func__); return -EINVAL; } } dev_set_drvdata(control_usb->dev, control_usb); return 0; }
static int bbif_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct resource *mem_res; int ret; int i; void __iomem *bbif_misc_base; pr_debug("%s: Entry\n", __func__); bbif_regulator_init(pdev); mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); bbif_base = devm_ioremap_resource(&pdev->dev, mem_res); if (IS_ERR(bbif_base)) return PTR_ERR(bbif_base); ret = misc_register(bbif_misc_dev); if (ret < 0) { misc_deregister(bbif_misc_dev); return ret; } /*ADC config */ bbif_misc_base = bbif_base + BBIF_MISC; __raw_writel(SIGMA_DELTA_VAL_0, bbif_misc_base + BBIF_MAP_SIGMA_DELTA_0); __raw_writel(SIGMA_DELTA_VAL_1, bbif_misc_base + BBIF_MAP_SIGMA_DELTA_1); __raw_writel(SIGMA_DELTA_VAL_2, bbif_misc_base + BBIF_MAP_SIGMA_DELTA_2); __raw_writel(SIGMA_DELTA_VAL_3, bbif_misc_base + BBIF_MAP_SIGMA_DELTA_3); __raw_writel(0, bbif_misc_base + BBIF_PRI_MODE); /* If the values are different make sure i=1 & i=2 are reversed */ for (i = 0; i < 6; i++) { __raw_writel(0, bbif_misc_base + BBIF_ADC_CFG + i*4); __raw_writel(BBIF_BBRX_TEST1, bbif_misc_base + BBIF_BBRX_TEST1_BASE + i*4); __raw_writel(BBIF_BBRX_TEST2, bbif_misc_base + BBIF_BBRX_TEST2_BASE + i*4); __raw_writel(BBIF_BBRX_TEST3, bbif_misc_base + BBIF_BBRX_TEST3_BASE + i*4); __raw_writel(BBIF_CONFIG_LTE_20_DPD, bbif_misc_base + BBIF_BBRX_CONFIG_BASE + i*4); __raw_writel(0, bbif_misc_base + BBIF_BBRX_CONTROL_BASE + i*4); usleep(100000); __raw_writel(7, bbif_misc_base + BBIF_BBRX_CONTROL_BASE + i*4); } /* DAC config */ set_combodac_cfg(0); reset_msbcal(); reset_dccal(); set_combodac_cfg(2); return of_platform_populate(np, NULL, NULL, &pdev->dev); }
static int __init nvadsp_probe(struct platform_device *pdev) { struct nvadsp_drv_data *drv_data; struct device *dev = &pdev->dev; struct resource *res = NULL; void __iomem *base = NULL; uint32_t aram_addr; uint32_t aram_size; int dram_iter; int ret = 0; int iter; dev_info(dev, "in probe()...\n"); drv_data = devm_kzalloc(dev, sizeof(*drv_data), GFP_KERNEL); if (!drv_data) { dev_err(&pdev->dev, "Failed to allocate driver data"); ret = -ENOMEM; goto out; } platform_set_drvdata(pdev, drv_data); ret = nvadsp_parse_dt(pdev); if (ret) goto out; #if CONFIG_DEBUG_FS if (adsp_debug_init(drv_data)) dev_err(dev, "unable to create tegra_ape debug fs directory\n"); #endif drv_data->base_regs = devm_kzalloc(dev, sizeof(void *) * APE_MAX_REG, GFP_KERNEL); if (!drv_data->base_regs) { dev_err(dev, "Failed to allocate regs"); ret = -ENOMEM; goto out; } for (iter = 0; iter < APE_MAX_REG; iter++) { res = platform_get_resource(pdev, IORESOURCE_MEM, iter); if (!res) { dev_err(dev, "Failed to get resource with ID %d\n", iter); ret = -EINVAL; goto out; } if (!drv_data->adsp_unit_fpga && iter == UNIT_FPGA_RST) continue; base = devm_ioremap_resource(dev, res); if (IS_ERR(base)) { dev_err(dev, "Failed to iomap resource reg[%d]\n", iter); ret = PTR_ERR(base); goto out; } drv_data->base_regs[iter] = base; adsp_add_load_mappings(res->start, base, resource_size(res)); } drv_data->base_regs_saved = drv_data->base_regs; for (dram_iter = 0; dram_iter < ADSP_MAX_DRAM_MAP; dram_iter++) { res = platform_get_resource(pdev, IORESOURCE_MEM, iter++); if (!res) { dev_err(dev, "Failed to get DRAM map with ID %d\n", iter); ret = -EINVAL; goto out; } drv_data->dram_region[dram_iter] = res; } nvadsp_drv_data = drv_data; #ifdef CONFIG_PM_RUNTIME tegra_adsp_pd_add_device(dev); pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); if (ret) goto out; #endif ret = nvadsp_os_probe(pdev); if (ret) goto err; ret = nvadsp_amc_init(pdev); if (ret) goto err; ret = nvadsp_hwmbox_init(pdev); if (ret) goto err; ret = nvadsp_mbox_init(pdev); if (ret) goto err; #ifdef CONFIG_TEGRA_EMC_APE_DFS ret = emc_dfs_init(pdev); if (ret) goto err; #endif #ifdef CONFIG_TEGRA_ADSP_ACTMON ret = ape_actmon_probe(pdev); if (ret) goto err; #endif ret = nvadsp_app_module_probe(pdev); if (ret) goto err; aram_addr = drv_data->adsp_mem[ARAM_ALIAS_0_ADDR]; aram_size = drv_data->adsp_mem[ARAM_ALIAS_0_SIZE]; ret = aram_init(aram_addr, aram_size); if (ret) dev_err(dev, "Failed to init aram\n"); err: #ifdef CONFIG_PM_RUNTIME ret = pm_runtime_put_sync(dev); if (ret) dev_err(dev, "pm_runtime_put_sync failed\n"); #endif out: return ret; }
static int sh_pfc_map_resources(struct sh_pfc *pfc, struct platform_device *pdev) { unsigned int num_windows = 0; unsigned int num_irqs = 0; struct sh_pfc_window *windows; unsigned int *irqs = NULL; struct resource *res; unsigned int i; /* Count the MEM and IRQ resources. */ for (i = 0; i < pdev->num_resources; ++i) { switch (resource_type(&pdev->resource[i])) { case IORESOURCE_MEM: num_windows++; break; case IORESOURCE_IRQ: num_irqs++; break; } } if (num_windows == 0) return -EINVAL; /* Allocate memory windows and IRQs arrays. */ windows = devm_kzalloc(pfc->dev, num_windows * sizeof(*windows), GFP_KERNEL); if (windows == NULL) return -ENOMEM; pfc->num_windows = num_windows; pfc->windows = windows; if (num_irqs) { irqs = devm_kzalloc(pfc->dev, num_irqs * sizeof(*irqs), GFP_KERNEL); if (irqs == NULL) return -ENOMEM; pfc->num_irqs = num_irqs; pfc->irqs = irqs; } /* Fill them. */ for (i = 0, res = pdev->resource; i < pdev->num_resources; i++, res++) { switch (resource_type(res)) { case IORESOURCE_MEM: windows->phys = res->start; windows->size = resource_size(res); windows->virt = devm_ioremap_resource(pfc->dev, res); if (IS_ERR(windows->virt)) return -ENOMEM; windows++; break; case IORESOURCE_IRQ: *irqs++ = res->start; break; } } return 0; }
static int sun6i_spi_probe(struct platform_device *pdev) { struct spi_master *master; struct sun6i_spi *sspi; struct resource *res; int ret = 0, irq; master = spi_alloc_master(&pdev->dev, sizeof(struct sun6i_spi)); if (!master) { dev_err(&pdev->dev, "Unable to allocate SPI Master\n"); return -ENOMEM; } platform_set_drvdata(pdev, master); sspi = spi_master_get_devdata(master); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); sspi->base_addr = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(sspi->base_addr)) { ret = PTR_ERR(sspi->base_addr); goto err_free_master; } irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "No spi IRQ specified\n"); ret = -ENXIO; goto err_free_master; } ret = devm_request_irq(&pdev->dev, irq, sun6i_spi_handler, 0, "sun6i-spi", sspi); if (ret) { dev_err(&pdev->dev, "Cannot request IRQ\n"); goto err_free_master; } sspi->master = master; sspi->fifo_depth = (unsigned long)of_device_get_match_data(&pdev->dev); master->max_speed_hz = 100 * 1000 * 1000; master->min_speed_hz = 3 * 1000; master->set_cs = sun6i_spi_set_cs; master->transfer_one = sun6i_spi_transfer_one; master->num_chipselect = 4; master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST; master->bits_per_word_mask = SPI_BPW_MASK(8); master->dev.of_node = pdev->dev.of_node; master->auto_runtime_pm = true; master->max_transfer_size = sun6i_spi_max_transfer_size; sspi->hclk = devm_clk_get(&pdev->dev, "ahb"); if (IS_ERR(sspi->hclk)) { dev_err(&pdev->dev, "Unable to acquire AHB clock\n"); ret = PTR_ERR(sspi->hclk); goto err_free_master; } sspi->mclk = devm_clk_get(&pdev->dev, "mod"); if (IS_ERR(sspi->mclk)) { dev_err(&pdev->dev, "Unable to acquire module clock\n"); ret = PTR_ERR(sspi->mclk); goto err_free_master; } init_completion(&sspi->done); sspi->rstc = devm_reset_control_get(&pdev->dev, NULL); if (IS_ERR(sspi->rstc)) { dev_err(&pdev->dev, "Couldn't get reset controller\n"); ret = PTR_ERR(sspi->rstc); goto err_free_master; } /* * This wake-up/shutdown pattern is to be able to have the * device woken up, even if runtime_pm is disabled */ ret = sun6i_spi_runtime_resume(&pdev->dev); if (ret) { dev_err(&pdev->dev, "Couldn't resume the device\n"); goto err_free_master; } pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); pm_runtime_idle(&pdev->dev); ret = devm_spi_register_master(&pdev->dev, master); if (ret) { dev_err(&pdev->dev, "cannot register SPI master\n"); goto err_pm_disable; } return 0; err_pm_disable: pm_runtime_disable(&pdev->dev); sun6i_spi_runtime_suspend(&pdev->dev); err_free_master: spi_master_put(master); return ret; }
static int stm32_ipcc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct stm32_ipcc *ipcc; struct resource *res; unsigned int i; int ret; u32 ip_ver; static const char * const irq_name[] = {"rx", "tx"}; irq_handler_t irq_thread[] = {stm32_ipcc_rx_irq, stm32_ipcc_tx_irq}; if (!np) { dev_err(dev, "No DT found\n"); return -ENODEV; } ipcc = devm_kzalloc(dev, sizeof(*ipcc), GFP_KERNEL); if (!ipcc) return -ENOMEM; /* proc_id */ if (of_property_read_u32(np, "st,proc-id", &ipcc->proc_id)) { dev_err(dev, "Missing st,proc-id\n"); return -ENODEV; } if (ipcc->proc_id >= STM32_MAX_PROCS) { dev_err(dev, "Invalid proc_id (%d)\n", ipcc->proc_id); return -EINVAL; } /* regs */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ipcc->reg_base = devm_ioremap_resource(dev, res); if (IS_ERR(ipcc->reg_base)) return PTR_ERR(ipcc->reg_base); ipcc->reg_proc = ipcc->reg_base + ipcc->proc_id * IPCC_PROC_OFFST; /* clock */ ipcc->clk = devm_clk_get(dev, NULL); if (IS_ERR(ipcc->clk)) return PTR_ERR(ipcc->clk); ret = clk_prepare_enable(ipcc->clk); if (ret) { dev_err(dev, "can not enable the clock\n"); return ret; } /* irq */ for (i = 0; i < IPCC_IRQ_NUM; i++) { ipcc->irqs[i] = platform_get_irq_byname(pdev, irq_name[i]); if (ipcc->irqs[i] < 0) { if (ipcc->irqs[i] != -EPROBE_DEFER) dev_err(dev, "no IRQ specified %s\n", irq_name[i]); ret = ipcc->irqs[i]; goto err_clk; } ret = devm_request_threaded_irq(dev, ipcc->irqs[i], NULL, irq_thread[i], IRQF_ONESHOT, dev_name(dev), ipcc); if (ret) { dev_err(dev, "failed to request irq %d (%d)\n", i, ret); goto err_clk; } } /* mask and enable rx/tx irq */ stm32_ipcc_set_bits(ipcc->reg_proc + IPCC_XMR, RX_BIT_MASK | TX_BIT_MASK); stm32_ipcc_set_bits(ipcc->reg_proc + IPCC_XCR, XCR_RXOIE | XCR_TXOIE); /* wakeup */ if (of_property_read_bool(np, "wakeup-source")) { ipcc->wkp = platform_get_irq_byname(pdev, "wakeup"); if (ipcc->wkp < 0) { if (ipcc->wkp != -EPROBE_DEFER) dev_err(dev, "could not get wakeup IRQ\n"); ret = ipcc->wkp; goto err_clk; } device_set_wakeup_capable(dev, true); ret = dev_pm_set_dedicated_wake_irq(dev, ipcc->wkp); if (ret) { dev_err(dev, "Failed to set wake up irq\n"); goto err_init_wkp; } } /* mailbox controller */ ipcc->n_chans = readl_relaxed(ipcc->reg_base + IPCC_HWCFGR); ipcc->n_chans &= IPCFGR_CHAN_MASK; ipcc->controller.dev = dev; ipcc->controller.txdone_irq = true; ipcc->controller.ops = &stm32_ipcc_ops; ipcc->controller.num_chans = ipcc->n_chans; ipcc->controller.chans = devm_kcalloc(dev, ipcc->controller.num_chans, sizeof(*ipcc->controller.chans), GFP_KERNEL); if (!ipcc->controller.chans) { ret = -ENOMEM; goto err_irq_wkp; } for (i = 0; i < ipcc->controller.num_chans; i++) ipcc->controller.chans[i].con_priv = (void *)i; ret = devm_mbox_controller_register(dev, &ipcc->controller); if (ret) goto err_irq_wkp; platform_set_drvdata(pdev, ipcc); ip_ver = readl_relaxed(ipcc->reg_base + IPCC_VER); dev_info(dev, "ipcc rev:%ld.%ld enabled, %d chans, proc %d\n", FIELD_GET(VER_MAJREV_MASK, ip_ver), FIELD_GET(VER_MINREV_MASK, ip_ver), ipcc->controller.num_chans, ipcc->proc_id); clk_disable_unprepare(ipcc->clk); return 0; err_irq_wkp: if (ipcc->wkp) dev_pm_clear_wake_irq(dev); err_init_wkp: device_init_wakeup(dev, false); err_clk: clk_disable_unprepare(ipcc->clk); return ret; }
/** * ehci_hcd_omap_probe - initialize TI-based HCDs * * Allocates basic resources for this USB host controller, and * then invokes the start() method for the HCD associated with it * through the hotplug entry's driver_data. */ static int ehci_hcd_omap_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct usbhs_omap_platform_data *pdata = dev->platform_data; struct resource *res; struct usb_hcd *hcd; void __iomem *regs; int ret = -ENODEV; int irq; int i; struct omap_hcd *omap; if (usb_disabled()) return -ENODEV; if (!dev->parent) { dev_err(dev, "Missing parent device\n"); return -ENODEV; } /* For DT boot, get platform data from parent. i.e. usbhshost */ if (dev->of_node) { pdata = dev->parent->platform_data; dev->platform_data = pdata; } if (!pdata) { dev_err(dev, "Missing platform data\n"); return -ENODEV; } irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(dev, "EHCI irq failed\n"); return -ENODEV; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); regs = devm_ioremap_resource(dev, res); if (IS_ERR(regs)) return PTR_ERR(regs); /* * Right now device-tree probed devices don't get dma_mask set. * Since shared usb code relies on it, set it here for now. * Once we have dma capability bindings this can go away. */ if (!dev->dma_mask) dev->dma_mask = &dev->coherent_dma_mask; if (!dev->coherent_dma_mask) dev->coherent_dma_mask = DMA_BIT_MASK(32); hcd = usb_create_hcd(&ehci_omap_hc_driver, dev, dev_name(dev)); if (!hcd) { dev_err(dev, "Failed to create HCD\n"); return -ENOMEM; } hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); hcd->regs = regs; hcd_to_ehci(hcd)->caps = regs; omap = (struct omap_hcd *)hcd_to_ehci(hcd)->priv; omap->nports = pdata->nports; platform_set_drvdata(pdev, hcd); /* get the PHY devices if needed */ for (i = 0 ; i < omap->nports ; i++) { struct usb_phy *phy; /* get the PHY device */ if (dev->of_node) phy = devm_usb_get_phy_by_phandle(dev, "phys", i); else phy = devm_usb_get_phy_dev(dev, i); if (IS_ERR(phy)) { /* Don't bail out if PHY is not absolutely necessary */ if (pdata->port_mode[i] != OMAP_EHCI_PORT_MODE_PHY) continue; ret = PTR_ERR(phy); dev_err(dev, "Can't get PHY device for port %d: %d\n", i, ret); goto err_phy; } omap->phy[i] = phy; if (pdata->port_mode[i] == OMAP_EHCI_PORT_MODE_PHY) { usb_phy_init(omap->phy[i]); /* bring PHY out of suspend */ usb_phy_set_suspend(omap->phy[i], 0); } } pm_runtime_enable(dev); pm_runtime_get_sync(dev); /* * An undocumented "feature" in the OMAP3 EHCI controller, * causes suspended ports to be taken out of suspend when * the USBCMD.Run/Stop bit is cleared (for example when * we do ehci_bus_suspend). * This breaks suspend-resume if the root-hub is allowed * to suspend. Writing 1 to this undocumented register bit * disables this feature and restores normal behavior. */ ehci_write(regs, EHCI_INSNREG04, EHCI_INSNREG04_DISABLE_UNSUSPEND); ret = usb_add_hcd(hcd, irq, IRQF_SHARED); if (ret) { dev_err(dev, "failed to add hcd with err %d\n", ret); goto err_pm_runtime; } /* * Bring PHYs out of reset for non PHY modes. * Even though HSIC mode is a PHY-less mode, the reset * line exists between the chips and can be modelled * as a PHY device for reset control. */ for (i = 0; i < omap->nports; i++) { if (!omap->phy[i] || pdata->port_mode[i] == OMAP_EHCI_PORT_MODE_PHY) continue; usb_phy_init(omap->phy[i]); /* bring PHY out of suspend */ usb_phy_set_suspend(omap->phy[i], 0); } return 0; err_pm_runtime: pm_runtime_put_sync(dev); err_phy: for (i = 0; i < omap->nports; i++) { if (omap->phy[i]) usb_phy_shutdown(omap->phy[i]); } usb_put_hcd(hcd); return ret; }
static int xapm_probe(struct platform_device *pdev) { struct xapm_dev *xapm; struct resource *res; int irq; int ret; void *ptr; xapm = devm_kzalloc(&pdev->dev, (sizeof(struct xapm_dev)), GFP_KERNEL); if (!xapm) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); xapm->regs = devm_ioremap_resource(&pdev->dev, res); if (!xapm->regs) { dev_err(&pdev->dev, "unable to iomap registers\n"); return -ENOMEM; } /* Initialize mode as Advanced so that if no mode in dts, default * is Advanced */ xapm->param.mode = XAPM_MODE_ADVANCED; ret = xapm_getprop(pdev, &xapm->param); if (ret < 0) return ret; xapm->info.mem[0].name = "xilinx_apm"; xapm->info.mem[0].addr = res->start; xapm->info.mem[0].size = resource_size(res); xapm->info.mem[0].memtype = UIO_MEM_PHYS; xapm->info.mem[1].addr = (unsigned long)kzalloc(UIO_DUMMY_MEMSIZE, GFP_KERNEL); ptr = (unsigned long *)xapm->info.mem[1].addr; xapm->info.mem[1].size = UIO_DUMMY_MEMSIZE; xapm->info.mem[1].memtype = UIO_MEM_LOGICAL; xapm->info.name = "axi-pmon"; xapm->info.version = DRV_VERSION; irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "unable to get irq\n"); return irq; } xapm->info.irq = irq; xapm->info.handler = xapm_handler; xapm->info.priv = xapm; memcpy(ptr, &xapm->param, sizeof(struct xapm_param)); ret = uio_register_device(&pdev->dev, &xapm->info); if (ret < 0) { dev_err(&pdev->dev, "unable to register to UIO\n"); return ret; } platform_set_drvdata(pdev, xapm); dev_info(&pdev->dev, "Probed Xilinx APM\n"); return 0; }
static int __init vdec_probe(struct platform_device *pdev) { struct vdec_device *p; struct resource *res; int ret; u32 hwid; /* Allocate private data */ p = devm_kzalloc(&pdev->dev, sizeof(struct vdec_device), GFP_KERNEL); if (!p) { dev_dbg(&pdev->dev, "out of memory\n"); return -ENOMEM; } p->dev = &pdev->dev; platform_set_drvdata(pdev, p); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); p->mmio_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(p->mmio_base)) return PTR_ERR(p->mmio_base); p->clk = devm_clk_get(&pdev->dev, "vdec_clk"); if (IS_ERR(p->clk)) { dev_err(&pdev->dev, "no vdec_clk clock defined\n"); return -ENXIO; } p->irq = platform_get_irq(pdev, 0); if (!p->irq) { dev_err(&pdev->dev, "could not get irq\n"); return -ENXIO; } ret = devm_request_irq(&pdev->dev, p->irq, vdec_isr, 0, pdev->name, p); if (ret) { dev_err(&pdev->dev, "unable to request VDEC irq\n"); return ret; } /* Register the miscdevice */ ret = misc_register(&vdec_misc_device); if (ret) { dev_err(&pdev->dev, "unable to register miscdevice\n"); return ret; } p->num_cores = VDEC_MAX_CORES; p->iosize = resource_size(res); p->iobaseaddr = res->start; vdec6731_global = p; p->dec_irq_done = false; p->pp_irq_done = false; p->dec_owner = NULL; p->pp_owner = NULL; init_waitqueue_head(&p->dec_wq); init_waitqueue_head(&p->pp_wq); sema_init(&p->dec_sem, VDEC_MAX_CORES); sema_init(&p->pp_sem, 1); ret = clk_prepare_enable(p->clk); if (ret) { dev_err(&pdev->dev, "unable to prepare and enable clock\n"); misc_deregister(&vdec_misc_device); return ret; } dev_info(&pdev->dev, "VDEC controller at 0x%p, irq = %d, misc_minor = %d\n", p->mmio_base, p->irq, vdec_misc_device.minor); /* Reset Asic (just in case..) */ vdec_writel(p, VDEC_DIR, VDEC_DIR_ID | VDEC_DIR_ABORT); vdec_writel(p, VDEC_PPIR, VDEC_PPIR_ID); hwid = vdec_readl(p, VDEC_IDR); clk_disable_unprepare(p->clk); dev_warn(&pdev->dev, "Product ID: %#x (revision %d.%d.%d)\n", \ (hwid & VDEC_IDR_PROD_ID) >> 16, (hwid & VDEC_IDR_MAJOR_VER) >> 12, (hwid & VDEC_IDR_MINOR_VER) >> 4, (hwid & VDEC_IDR_BUILD_VER)); return 0; }
static int bfin_rotary_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; const struct bfin_rotary_platform_data *pdata = dev_get_platdata(dev); struct bfin_rot *rotary; struct resource *res; struct input_dev *input; int error; /* Basic validation */ if ((pdata->rotary_up_key && !pdata->rotary_down_key) || (!pdata->rotary_up_key && pdata->rotary_down_key)) { return -EINVAL; } if (pdata->pin_list) { error = peripheral_request_list(pdata->pin_list, dev_name(dev)); if (error) { dev_err(dev, "requesting peripherals failed: %d\n", error); return error; } error = devm_add_action_or_reset(dev, bfin_rotary_free_action, pdata->pin_list); if (error) { dev_err(dev, "setting cleanup action failed: %d\n", error); return error; } } rotary = devm_kzalloc(dev, sizeof(struct bfin_rot), GFP_KERNEL); if (!rotary) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); rotary->base = devm_ioremap_resource(dev, res); if (IS_ERR(rotary->base)) return PTR_ERR(rotary->base); input = devm_input_allocate_device(dev); if (!input) return -ENOMEM; rotary->input = input; rotary->up_key = pdata->rotary_up_key; rotary->down_key = pdata->rotary_down_key; rotary->button_key = pdata->rotary_button_key; rotary->rel_code = pdata->rotary_rel_code; rotary->mode = pdata->mode; rotary->debounce = pdata->debounce; input->name = pdev->name; input->phys = "bfin-rotary/input0"; input->dev.parent = dev; input_set_drvdata(input, rotary); input->id.bustype = BUS_HOST; input->id.vendor = 0x0001; input->id.product = 0x0001; input->id.version = 0x0100; input->open = bfin_rotary_open; input->close = bfin_rotary_close; if (rotary->up_key) { __set_bit(EV_KEY, input->evbit); __set_bit(rotary->up_key, input->keybit); __set_bit(rotary->down_key, input->keybit); } else { __set_bit(EV_REL, input->evbit); __set_bit(rotary->rel_code, input->relbit); } if (rotary->button_key) { __set_bit(EV_KEY, input->evbit); __set_bit(rotary->button_key, input->keybit); } /* Quiesce the device before requesting irq */ bfin_rotary_close(input); rotary->irq = platform_get_irq(pdev, 0); if (rotary->irq < 0) { dev_err(dev, "No rotary IRQ specified\n"); return -ENOENT; } error = devm_request_irq(dev, rotary->irq, bfin_rotary_isr, 0, dev_name(dev), rotary); if (error) { dev_err(dev, "unable to claim irq %d; error %d\n", rotary->irq, error); return error; } error = input_register_device(input); if (error) { dev_err(dev, "unable to register input device (%d)\n", error); return error; } platform_set_drvdata(pdev, rotary); device_init_wakeup(dev, 1); return 0; }
static int dwc3_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct dwc3_platform_data *pdata = dev_get_platdata(dev); struct resource *res; struct dwc3 *dwc; u8 lpm_nyet_threshold; u8 tx_de_emphasis; u8 hird_threshold; u32 fladj = 0; int ret; void __iomem *regs; void *mem; mem = devm_kzalloc(dev, sizeof(*dwc) + DWC3_ALIGN_MASK, GFP_KERNEL); if (!mem) return -ENOMEM; dwc = PTR_ALIGN(mem, DWC3_ALIGN_MASK + 1); dwc->mem = mem; dwc->dev = dev; res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { dev_err(dev, "missing IRQ\n"); return -ENODEV; } dwc->xhci_resources[1].start = res->start; dwc->xhci_resources[1].end = res->end; dwc->xhci_resources[1].flags = res->flags; dwc->xhci_resources[1].name = res->name; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(dev, "missing memory resource\n"); return -ENODEV; } dwc->xhci_resources[0].start = res->start; dwc->xhci_resources[0].end = dwc->xhci_resources[0].start + DWC3_XHCI_REGS_END; dwc->xhci_resources[0].flags = res->flags; dwc->xhci_resources[0].name = res->name; res->start += DWC3_GLOBALS_REGS_START; /* * Request memory region but exclude xHCI regs, * since it will be requested by the xhci-plat driver. */ regs = devm_ioremap_resource(dev, res); if (IS_ERR(regs)) { ret = PTR_ERR(regs); goto err0; } dwc->regs = regs; dwc->regs_size = resource_size(res); /* default to highest possible threshold */ lpm_nyet_threshold = 0xff; /* default to -3.5dB de-emphasis */ tx_de_emphasis = 1; /* * default to assert utmi_sleep_n and use maximum allowed HIRD * threshold value of 0b1100 */ hird_threshold = 12; dwc->maximum_speed = usb_get_maximum_speed(dev); dwc->dr_mode = usb_get_dr_mode(dev); dwc->has_lpm_erratum = device_property_read_bool(dev, "snps,has-lpm-erratum"); device_property_read_u8(dev, "snps,lpm-nyet-threshold", &lpm_nyet_threshold); dwc->is_utmi_l1_suspend = device_property_read_bool(dev, "snps,is-utmi-l1-suspend"); device_property_read_u8(dev, "snps,hird-threshold", &hird_threshold); dwc->usb3_lpm_capable = device_property_read_bool(dev, "snps,usb3_lpm_capable"); dwc->needs_fifo_resize = device_property_read_bool(dev, "tx-fifo-resize"); dwc->disable_scramble_quirk = device_property_read_bool(dev, "snps,disable_scramble_quirk"); dwc->u2exit_lfps_quirk = device_property_read_bool(dev, "snps,u2exit_lfps_quirk"); dwc->u2ss_inp3_quirk = device_property_read_bool(dev, "snps,u2ss_inp3_quirk"); dwc->req_p1p2p3_quirk = device_property_read_bool(dev, "snps,req_p1p2p3_quirk"); dwc->del_p1p2p3_quirk = device_property_read_bool(dev, "snps,del_p1p2p3_quirk"); dwc->del_phy_power_chg_quirk = device_property_read_bool(dev, "snps,del_phy_power_chg_quirk"); dwc->lfps_filter_quirk = device_property_read_bool(dev, "snps,lfps_filter_quirk"); dwc->rx_detect_poll_quirk = device_property_read_bool(dev, "snps,rx_detect_poll_quirk"); dwc->dis_u3_susphy_quirk = device_property_read_bool(dev, "snps,dis_u3_susphy_quirk"); dwc->dis_u2_susphy_quirk = device_property_read_bool(dev, "snps,dis_u2_susphy_quirk"); dwc->dis_enblslpm_quirk = device_property_read_bool(dev, "snps,dis_enblslpm_quirk"); dwc->tx_de_emphasis_quirk = device_property_read_bool(dev, "snps,tx_de_emphasis_quirk"); device_property_read_u8(dev, "snps,tx_de_emphasis", &tx_de_emphasis); device_property_read_string(dev, "snps,hsphy_interface", &dwc->hsphy_interface); device_property_read_u32(dev, "snps,quirk-frame-length-adjustment", &fladj); if (pdata) { dwc->maximum_speed = pdata->maximum_speed; dwc->has_lpm_erratum = pdata->has_lpm_erratum; if (pdata->lpm_nyet_threshold) lpm_nyet_threshold = pdata->lpm_nyet_threshold; dwc->is_utmi_l1_suspend = pdata->is_utmi_l1_suspend; if (pdata->hird_threshold) hird_threshold = pdata->hird_threshold; dwc->needs_fifo_resize = pdata->tx_fifo_resize; dwc->usb3_lpm_capable = pdata->usb3_lpm_capable; dwc->dr_mode = pdata->dr_mode; dwc->disable_scramble_quirk = pdata->disable_scramble_quirk; dwc->u2exit_lfps_quirk = pdata->u2exit_lfps_quirk; dwc->u2ss_inp3_quirk = pdata->u2ss_inp3_quirk; dwc->req_p1p2p3_quirk = pdata->req_p1p2p3_quirk; dwc->del_p1p2p3_quirk = pdata->del_p1p2p3_quirk; dwc->del_phy_power_chg_quirk = pdata->del_phy_power_chg_quirk; dwc->lfps_filter_quirk = pdata->lfps_filter_quirk; dwc->rx_detect_poll_quirk = pdata->rx_detect_poll_quirk; dwc->dis_u3_susphy_quirk = pdata->dis_u3_susphy_quirk; dwc->dis_u2_susphy_quirk = pdata->dis_u2_susphy_quirk; dwc->dis_enblslpm_quirk = pdata->dis_enblslpm_quirk; dwc->tx_de_emphasis_quirk = pdata->tx_de_emphasis_quirk; if (pdata->tx_de_emphasis) tx_de_emphasis = pdata->tx_de_emphasis; dwc->hsphy_interface = pdata->hsphy_interface; fladj = pdata->fladj_value; } dwc->lpm_nyet_threshold = lpm_nyet_threshold; dwc->tx_de_emphasis = tx_de_emphasis; dwc->hird_threshold = hird_threshold | (dwc->is_utmi_l1_suspend << 4); platform_set_drvdata(pdev, dwc); dwc3_cache_hwparams(dwc); ret = dwc3_phy_setup(dwc); if (ret) goto err0; ret = dwc3_core_get_phy(dwc); if (ret) goto err0; spin_lock_init(&dwc->lock); if (!dev->dma_mask) { dev->dma_mask = dev->parent->dma_mask; dev->dma_parms = dev->parent->dma_parms; dma_set_coherent_mask(dev, dev->parent->coherent_dma_mask); } pm_runtime_enable(dev); pm_runtime_get_sync(dev); pm_runtime_forbid(dev); ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE); if (ret) { dev_err(dwc->dev, "failed to allocate event buffers\n"); ret = -ENOMEM; goto err1; } if (IS_ENABLED(CONFIG_USB_DWC3_HOST)) dwc->dr_mode = USB_DR_MODE_HOST; else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET)) dwc->dr_mode = USB_DR_MODE_PERIPHERAL; if (dwc->dr_mode == USB_DR_MODE_UNKNOWN) dwc->dr_mode = USB_DR_MODE_OTG; ret = dwc3_core_init(dwc); if (ret) { dev_err(dev, "failed to initialize core\n"); goto err1; } /* Check the maximum_speed parameter */ switch (dwc->maximum_speed) { case USB_SPEED_LOW: case USB_SPEED_FULL: case USB_SPEED_HIGH: case USB_SPEED_SUPER: case USB_SPEED_SUPER_PLUS: break; default: dev_err(dev, "invalid maximum_speed parameter %d\n", dwc->maximum_speed); /* fall through */ case USB_SPEED_UNKNOWN: /* default to superspeed */ dwc->maximum_speed = USB_SPEED_SUPER; /* * default to superspeed plus if we are capable. */ if (dwc3_is_usb31(dwc) && (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) == DWC3_GHWPARAMS3_SSPHY_IFC_GEN2)) dwc->maximum_speed = USB_SPEED_SUPER_PLUS; break; } /* Adjust Frame Length */ dwc3_frame_length_adjustment(dwc, fladj); usb_phy_set_suspend(dwc->usb2_phy, 0); usb_phy_set_suspend(dwc->usb3_phy, 0); ret = phy_power_on(dwc->usb2_generic_phy); if (ret < 0) goto err2; ret = phy_power_on(dwc->usb3_generic_phy); if (ret < 0) goto err3; ret = dwc3_event_buffers_setup(dwc); if (ret) { dev_err(dwc->dev, "failed to setup event buffers\n"); goto err4; } ret = dwc3_core_init_mode(dwc); if (ret) goto err5; ret = dwc3_debugfs_init(dwc); if (ret) { dev_err(dev, "failed to initialize debugfs\n"); goto err6; } pm_runtime_allow(dev); return 0; err6: dwc3_core_exit_mode(dwc); err5: dwc3_event_buffers_cleanup(dwc); err4: phy_power_off(dwc->usb3_generic_phy); err3: phy_power_off(dwc->usb2_generic_phy); err2: usb_phy_set_suspend(dwc->usb2_phy, 1); usb_phy_set_suspend(dwc->usb3_phy, 1); dwc3_core_exit(dwc); err1: dwc3_free_event_buffers(dwc); dwc3_ulpi_exit(dwc); err0: /* * restore res->start back to its original value so that, in case the * probe is deferred, we don't end up getting error in request the * memory region the next time probe is called. */ res->start -= DWC3_GLOBALS_REGS_START; return ret; }
static int __init exynos_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct exynos_pcie *exynos_pcie; struct pcie_port *pp; struct device_node *np = dev->of_node; struct resource *elbi_base; struct resource *phy_base; struct resource *block_base; int ret; exynos_pcie = devm_kzalloc(dev, sizeof(*exynos_pcie), GFP_KERNEL); if (!exynos_pcie) return -ENOMEM; pp = &exynos_pcie->pp; pp->dev = dev; exynos_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0); exynos_pcie->clk = devm_clk_get(dev, "pcie"); if (IS_ERR(exynos_pcie->clk)) { dev_err(dev, "Failed to get pcie rc clock\n"); return PTR_ERR(exynos_pcie->clk); } ret = clk_prepare_enable(exynos_pcie->clk); if (ret) return ret; exynos_pcie->bus_clk = devm_clk_get(dev, "pcie_bus"); if (IS_ERR(exynos_pcie->bus_clk)) { dev_err(dev, "Failed to get pcie bus clock\n"); ret = PTR_ERR(exynos_pcie->bus_clk); goto fail_clk; } ret = clk_prepare_enable(exynos_pcie->bus_clk); if (ret) goto fail_clk; elbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); exynos_pcie->elbi_base = devm_ioremap_resource(dev, elbi_base); if (IS_ERR(exynos_pcie->elbi_base)) { ret = PTR_ERR(exynos_pcie->elbi_base); goto fail_bus_clk; } phy_base = platform_get_resource(pdev, IORESOURCE_MEM, 1); exynos_pcie->phy_base = devm_ioremap_resource(dev, phy_base); if (IS_ERR(exynos_pcie->phy_base)) { ret = PTR_ERR(exynos_pcie->phy_base); goto fail_bus_clk; } block_base = platform_get_resource(pdev, IORESOURCE_MEM, 2); exynos_pcie->block_base = devm_ioremap_resource(dev, block_base); if (IS_ERR(exynos_pcie->block_base)) { ret = PTR_ERR(exynos_pcie->block_base); goto fail_bus_clk; } ret = exynos_add_pcie_port(exynos_pcie, pdev); if (ret < 0) goto fail_bus_clk; platform_set_drvdata(pdev, exynos_pcie); return 0; fail_bus_clk: clk_disable_unprepare(exynos_pcie->bus_clk); fail_clk: clk_disable_unprepare(exynos_pcie->clk); return ret; }
static int img_spfi_probe(struct platform_device *pdev) { struct spi_master *master; struct img_spfi *spfi; struct resource *res; int ret; master = spi_alloc_master(&pdev->dev, sizeof(*spfi)); if (!master) return -ENOMEM; platform_set_drvdata(pdev, master); spfi = spi_master_get_devdata(master); spfi->dev = &pdev->dev; spfi->master = master; spin_lock_init(&spfi->lock); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); spfi->regs = devm_ioremap_resource(spfi->dev, res); if (IS_ERR(spfi->regs)) { ret = PTR_ERR(spfi->regs); goto put_spi; } spfi->phys = res->start; spfi->irq = platform_get_irq(pdev, 0); if (spfi->irq < 0) { ret = spfi->irq; goto put_spi; } ret = devm_request_irq(spfi->dev, spfi->irq, img_spfi_irq, IRQ_TYPE_LEVEL_HIGH, dev_name(spfi->dev), spfi); if (ret) goto put_spi; spfi->sys_clk = devm_clk_get(spfi->dev, "sys"); if (IS_ERR(spfi->sys_clk)) { ret = PTR_ERR(spfi->sys_clk); goto put_spi; } spfi->spfi_clk = devm_clk_get(spfi->dev, "spfi"); if (IS_ERR(spfi->spfi_clk)) { ret = PTR_ERR(spfi->spfi_clk); goto put_spi; } ret = clk_prepare_enable(spfi->sys_clk); if (ret) goto put_spi; ret = clk_prepare_enable(spfi->spfi_clk); if (ret) goto disable_pclk; spfi_reset(spfi); /* * Only enable the error (IACCESS) interrupt. In PIO mode we'll * poll the status of the FIFOs. */ spfi_writel(spfi, SPFI_INTERRUPT_IACCESS, SPFI_INTERRUPT_ENABLE); master->auto_runtime_pm = true; master->bus_num = pdev->id; master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL; if (of_property_read_bool(spfi->dev->of_node, "img,supports-quad-mode")) master->mode_bits |= SPI_TX_QUAD | SPI_RX_QUAD; master->dev.of_node = pdev->dev.of_node; master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(8); master->max_speed_hz = clk_get_rate(spfi->spfi_clk) / 4; master->min_speed_hz = clk_get_rate(spfi->spfi_clk) / 512; master->setup = img_spfi_setup; master->cleanup = img_spfi_cleanup; master->transfer_one = img_spfi_transfer_one; master->prepare_message = img_spfi_prepare; master->unprepare_message = img_spfi_unprepare; master->handle_err = img_spfi_handle_err; spfi->tx_ch = dma_request_slave_channel(spfi->dev, "tx"); spfi->rx_ch = dma_request_slave_channel(spfi->dev, "rx"); if (!spfi->tx_ch || !spfi->rx_ch) { if (spfi->tx_ch) dma_release_channel(spfi->tx_ch); if (spfi->rx_ch) dma_release_channel(spfi->rx_ch); dev_warn(spfi->dev, "Failed to get DMA channels, falling back to PIO mode\n"); } else { master->dma_tx = spfi->tx_ch; master->dma_rx = spfi->rx_ch; master->can_dma = img_spfi_can_dma; } pm_runtime_set_active(spfi->dev); pm_runtime_enable(spfi->dev); ret = devm_spi_register_master(spfi->dev, master); if (ret) goto disable_pm; return 0; disable_pm: pm_runtime_disable(spfi->dev); if (spfi->rx_ch) dma_release_channel(spfi->rx_ch); if (spfi->tx_ch) dma_release_channel(spfi->tx_ch); clk_disable_unprepare(spfi->spfi_clk); disable_pclk: clk_disable_unprepare(spfi->sys_clk); put_spi: spi_master_put(master); return ret; }
static int fimc_is_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct fimc_is *is; struct resource res; struct device_node *node; int ret; is = devm_kzalloc(&pdev->dev, sizeof(*is), GFP_KERNEL); if (!is) return -ENOMEM; is->pdev = pdev; is->isp.pdev = pdev; init_waitqueue_head(&is->irq_queue); spin_lock_init(&is->slock); mutex_init(&is->lock); ret = of_address_to_resource(dev->of_node, 0, &res); if (ret < 0) return ret; is->regs = devm_ioremap_resource(dev, &res); if (IS_ERR(is->regs)) return PTR_ERR(is->regs); node = of_get_child_by_name(dev->of_node, "pmu"); if (!node) return -ENODEV; is->pmu_regs = of_iomap(node, 0); if (!is->pmu_regs) return -ENOMEM; is->irq = irq_of_parse_and_map(dev->of_node, 0); if (is->irq < 0) { dev_err(dev, "no irq found\n"); return is->irq; } ret = fimc_is_get_clocks(is); if (ret < 0) return ret; platform_set_drvdata(pdev, is); ret = request_irq(is->irq, fimc_is_irq_handler, 0, dev_name(dev), is); if (ret < 0) { dev_err(dev, "irq request failed\n"); goto err_clk; } pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); if (ret < 0) goto err_irq; is->alloc_ctx = vb2_dma_contig_init_ctx(dev); if (IS_ERR(is->alloc_ctx)) { ret = PTR_ERR(is->alloc_ctx); goto err_irq; } /* * Register FIMC-IS V4L2 subdevs to this driver. The video nodes * will be created within the subdev's registered() callback. */ ret = fimc_is_register_subdevs(is); if (ret < 0) goto err_vb; ret = fimc_is_debugfs_create(is); if (ret < 0) goto err_sd; ret = fimc_is_request_firmware(is, FIMC_IS_FW_FILENAME); if (ret < 0) goto err_dfs; pm_runtime_put_sync(dev); dev_dbg(dev, "FIMC-IS registered successfully\n"); return 0; err_dfs: fimc_is_debugfs_remove(is); err_vb: vb2_dma_contig_cleanup_ctx(is->alloc_ctx); err_sd: fimc_is_unregister_subdevs(is); err_irq: free_irq(is->irq, is); err_clk: fimc_is_put_clocks(is); return ret; }
static int oxnas_gpio_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct of_phandle_args pinspec; struct oxnas_gpio_bank *bank; unsigned int id, ngpios; int irq, ret; struct resource *res; if (of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &pinspec)) { dev_err(&pdev->dev, "gpio-ranges property not found\n"); return -EINVAL; } id = pinspec.args[1] / PINS_PER_BANK; ngpios = pinspec.args[2]; if (id >= ARRAY_SIZE(oxnas_gpio_banks)) { dev_err(&pdev->dev, "invalid gpio-ranges base arg\n"); return -EINVAL; } if (ngpios > PINS_PER_BANK) { dev_err(&pdev->dev, "invalid gpio-ranges count arg\n"); return -EINVAL; } bank = &oxnas_gpio_banks[id]; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); bank->reg_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(bank->reg_base)) return PTR_ERR(bank->reg_base); irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "irq get failed\n"); return irq; } bank->id = id; bank->gpio_chip.parent = &pdev->dev; bank->gpio_chip.of_node = np; bank->gpio_chip.ngpio = ngpios; ret = gpiochip_add_data(&bank->gpio_chip, bank); if (ret < 0) { dev_err(&pdev->dev, "Failed to add GPIO chip %u: %d\n", id, ret); return ret; } ret = gpiochip_irqchip_add(&bank->gpio_chip, &bank->irq_chip, 0, handle_level_irq, IRQ_TYPE_NONE); if (ret < 0) { dev_err(&pdev->dev, "Failed to add IRQ chip %u: %d\n", id, ret); gpiochip_remove(&bank->gpio_chip); return ret; } gpiochip_set_chained_irqchip(&bank->gpio_chip, &bank->irq_chip, irq, oxnas_gpio_irq_handler); return 0; }
static int samsung_usb2phy_probe(struct platform_device *pdev) { struct samsung_usbphy *sphy; struct usb_otg *otg; struct samsung_usbphy_data *pdata = dev_get_platdata(&pdev->dev); const struct samsung_usbphy_drvdata *drv_data; struct device *dev = &pdev->dev; struct resource *phy_mem; void __iomem *phy_base; struct clk *clk; int ret; phy_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); phy_base = devm_ioremap_resource(dev, phy_mem); if (IS_ERR(phy_base)) return PTR_ERR(phy_base); sphy = devm_kzalloc(dev, sizeof(*sphy), GFP_KERNEL); if (!sphy) return -ENOMEM; otg = devm_kzalloc(dev, sizeof(*otg), GFP_KERNEL); if (!otg) return -ENOMEM; drv_data = samsung_usbphy_get_driver_data(pdev); if (drv_data->cpu_type == TYPE_EXYNOS5250 || drv_data->cpu_type == TYPE_EXYNOS5420) clk = devm_clk_get(dev, "usbhost"); else clk = devm_clk_get(dev, "otg"); if (IS_ERR(clk)) { dev_err(dev, "Failed to get usbhost/otg clock\n"); return PTR_ERR(clk); } sphy->dev = dev; if (dev->of_node) { ret = samsung_usbphy_parse_dt(sphy); if (ret < 0) return ret; } else { if (!pdata) { dev_err(dev, "no platform data specified\n"); return -EINVAL; } } sphy->plat = pdata; sphy->regs = phy_base; sphy->clk = clk; sphy->drv_data = drv_data; sphy->phy.dev = sphy->dev; sphy->phy.label = "samsung-usb2phy"; sphy->phy.type = USB_PHY_TYPE_USB2; sphy->phy.init = samsung_usb2phy_init; sphy->phy.shutdown = samsung_usb2phy_shutdown; sphy->ref_clk_freq = samsung_usbphy_get_refclk_freq(sphy); if (sphy->ref_clk_freq < 0) return -EINVAL; sphy->phy.otg = otg; sphy->phy.otg->phy = &sphy->phy; sphy->phy.otg->set_host = samsung_usbphy_set_host; spin_lock_init(&sphy->lock); platform_set_drvdata(pdev, sphy); return usb_add_phy_dev(&sphy->phy); }
static int rockchip_thermal_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct rockchip_thermal_data *thermal; const struct of_device_id *match; struct resource *res; int irq; int i; int error; match = of_match_node(of_rockchip_thermal_match, np); if (!match) return -ENXIO; irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "no irq resource?\n"); return -EINVAL; } thermal = devm_kzalloc(&pdev->dev, sizeof(struct rockchip_thermal_data), GFP_KERNEL); if (!thermal) return -ENOMEM; thermal->pdev = pdev; thermal->chip = (const struct rockchip_tsadc_chip *)match->data; if (!thermal->chip) return -EINVAL; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); thermal->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(thermal->regs)) return PTR_ERR(thermal->regs); thermal->reset = devm_reset_control_get(&pdev->dev, "tsadc-apb"); if (IS_ERR(thermal->reset)) { error = PTR_ERR(thermal->reset); dev_err(&pdev->dev, "failed to get tsadc reset: %d\n", error); return error; } thermal->clk = devm_clk_get(&pdev->dev, "tsadc"); if (IS_ERR(thermal->clk)) { error = PTR_ERR(thermal->clk); dev_err(&pdev->dev, "failed to get tsadc clock: %d\n", error); return error; } thermal->pclk = devm_clk_get(&pdev->dev, "apb_pclk"); if (IS_ERR(thermal->pclk)) { error = PTR_ERR(thermal->clk); dev_err(&pdev->dev, "failed to get apb_pclk clock: %d\n", error); return error; } error = clk_prepare_enable(thermal->clk); if (error) { dev_err(&pdev->dev, "failed to enable converter clock: %d\n", error); return error; } error = clk_prepare_enable(thermal->pclk); if (error) { dev_err(&pdev->dev, "failed to enable pclk: %d\n", error); goto err_disable_clk; } rockchip_thermal_reset_controller(thermal->reset); error = rockchip_configure_from_dt(&pdev->dev, np, thermal); if (error) { dev_err(&pdev->dev, "failed to parse device tree data: %d\n", error); goto err_disable_pclk; } thermal->chip->initialize(thermal->regs, thermal->tshut_polarity); error = rockchip_thermal_register_sensor(pdev, thermal, &thermal->sensors[0], SENSOR_CPU); if (error) { dev_err(&pdev->dev, "failed to register CPU thermal sensor: %d\n", error); goto err_disable_pclk; } error = rockchip_thermal_register_sensor(pdev, thermal, &thermal->sensors[1], SENSOR_GPU); if (error) { dev_err(&pdev->dev, "failed to register GPU thermal sensor: %d\n", error); goto err_unregister_cpu_sensor; } error = devm_request_threaded_irq(&pdev->dev, irq, NULL, &rockchip_thermal_alarm_irq_thread, IRQF_ONESHOT, "rockchip_thermal", thermal); if (error) { dev_err(&pdev->dev, "failed to request tsadc irq: %d\n", error); goto err_unregister_gpu_sensor; } thermal->chip->control(thermal->regs, true); for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) rockchip_thermal_toggle_sensor(&thermal->sensors[i], true); platform_set_drvdata(pdev, thermal); return 0; err_unregister_gpu_sensor: thermal_zone_of_sensor_unregister(&pdev->dev, thermal->sensors[1].tzd); err_unregister_cpu_sensor: thermal_zone_of_sensor_unregister(&pdev->dev, thermal->sensors[0].tzd); err_disable_pclk: clk_disable_unprepare(thermal->pclk); err_disable_clk: clk_disable_unprepare(thermal->clk); return error; }
static int etm_probe(struct amba_device *adev, const struct amba_id *id) { int ret; void __iomem *base; struct device *dev = &adev->dev; struct coresight_platform_data *pdata = NULL; struct etm_drvdata *drvdata; struct resource *res = &adev->res; struct coresight_desc desc = { 0 }; drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); if (!drvdata) return -ENOMEM; drvdata->use_cp14 = fwnode_property_read_bool(dev->fwnode, "arm,cp14"); dev_set_drvdata(dev, drvdata); /* Validity for the resource is already checked by the AMBA core */ base = devm_ioremap_resource(dev, res); if (IS_ERR(base)) return PTR_ERR(base); drvdata->base = base; spin_lock_init(&drvdata->spinlock); drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */ if (!IS_ERR(drvdata->atclk)) { ret = clk_prepare_enable(drvdata->atclk); if (ret) return ret; } drvdata->cpu = coresight_get_cpu(dev); desc.name = devm_kasprintf(dev, GFP_KERNEL, "etm%d", drvdata->cpu); if (!desc.name) return -ENOMEM; cpus_read_lock(); etmdrvdata[drvdata->cpu] = drvdata; if (smp_call_function_single(drvdata->cpu, etm_init_arch_data, drvdata, 1)) dev_err(dev, "ETM arch init failed\n"); if (!etm_count++) { cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING, "arm/coresight:starting", etm_starting_cpu, etm_dying_cpu); ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN, "arm/coresight:online", etm_online_cpu, NULL); if (ret < 0) goto err_arch_supported; hp_online = ret; } cpus_read_unlock(); if (etm_arch_supported(drvdata->arch) == false) { ret = -EINVAL; goto err_arch_supported; } etm_init_trace_id(drvdata); etm_set_default(&drvdata->config); pdata = coresight_get_platform_data(dev); if (IS_ERR(pdata)) { ret = PTR_ERR(pdata); goto err_arch_supported; } adev->dev.platform_data = pdata; desc.type = CORESIGHT_DEV_TYPE_SOURCE; desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC; desc.ops = &etm_cs_ops; desc.pdata = pdata; desc.dev = dev; desc.groups = coresight_etm_groups; drvdata->csdev = coresight_register(&desc); if (IS_ERR(drvdata->csdev)) { ret = PTR_ERR(drvdata->csdev); goto err_arch_supported; } ret = etm_perf_symlink(drvdata->csdev, true); if (ret) { coresight_unregister(drvdata->csdev); goto err_arch_supported; } pm_runtime_put(&adev->dev); dev_info(&drvdata->csdev->dev, "%s initialized\n", (char *)coresight_get_uci_data(id)); if (boot_enable) { coresight_enable(drvdata->csdev); drvdata->boot_enable = true; } return 0; err_arch_supported: if (--etm_count == 0) { cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING); if (hp_online) cpuhp_remove_state_nocalls(hp_online); } return ret; }