static void bcm2835_dma_init(struct spi_master *master, struct device *dev) { struct dma_slave_config slave_config; const __be32 *addr; dma_addr_t dma_reg_base; int ret; /* base address in dma-space */ addr = of_get_address(master->dev.of_node, 0, NULL, NULL); if (!addr) { dev_err(dev, "could not get DMA-register address - not using dma mode\n"); goto err; } dma_reg_base = be32_to_cpup(addr); /* get tx/rx dma */ master->dma_tx = dma_request_slave_channel(dev, "tx"); if (!master->dma_tx) { dev_err(dev, "no tx-dma configuration found - not using dma mode\n"); goto err; } master->dma_rx = dma_request_slave_channel(dev, "rx"); if (!master->dma_rx) { dev_err(dev, "no rx-dma configuration found - not using dma mode\n"); goto err_release; } /* configure DMAs */ slave_config.direction = DMA_MEM_TO_DEV; slave_config.dst_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO); slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; ret = dmaengine_slave_config(master->dma_tx, &slave_config); if (ret) goto err_config; slave_config.direction = DMA_DEV_TO_MEM; slave_config.src_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO); slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; ret = dmaengine_slave_config(master->dma_rx, &slave_config); if (ret) goto err_config; /* all went well, so set can_dma */ master->can_dma = bcm2835_spi_can_dma; master->max_dma_len = 65535; /* limitation by BCM2835_SPI_DLEN */ /* need to do TX AND RX DMA, so we need dummy buffers */ master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX; return; err_config: dev_err(dev, "issue configuring dma: %d - not using DMA mode\n", ret); err_release: bcm2835_dma_release(master); err: return; }
static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx, struct spi_master *master, const struct resource *res) { int ret; /* Prepare for TX DMA: */ master->dma_tx = dma_request_slave_channel(dev, "tx"); if (!master->dma_tx) { dev_err(dev, "cannot get the TX DMA channel!\n"); ret = -EINVAL; goto err; } spi_imx->tx_config.direction = DMA_MEM_TO_DEV; spi_imx->tx_config.dst_addr = res->start + MXC_CSPITXDATA; spi_imx->tx_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; spi_imx->tx_config.dst_maxburst = spi_imx_get_fifosize(spi_imx) / 4; ret = dmaengine_slave_config(master->dma_tx, &spi_imx->tx_config); if (ret) { dev_err(dev, "error in TX dma configuration.\n"); goto err; } /* Prepare for RX : */ master->dma_rx = dma_request_slave_channel(dev, "rx"); if (!master->dma_rx) { dev_dbg(dev, "cannot get the DMA channel.\n"); ret = -EINVAL; goto err; } spi_imx->rx_config.direction = DMA_DEV_TO_MEM; spi_imx->rx_config.src_addr = res->start + MXC_CSPIRXDATA; spi_imx->rx_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; spi_imx->rx_config.src_maxburst = spi_imx_get_fifosize(spi_imx) / 2; ret = dmaengine_slave_config(master->dma_rx, &spi_imx->rx_config); if (ret) { dev_err(dev, "error in RX dma configuration.\n"); goto err; } init_completion(&spi_imx->dma_rx_completion); init_completion(&spi_imx->dma_tx_completion); master->can_dma = spi_imx_can_dma; master->max_dma_len = MAX_SDMA_BD_BYTES; spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX; spi_imx->rx_wml = spi_imx->rx_config.src_maxburst; spi_imx->tx_wml = spi_imx->tx_config.dst_maxburst; spi_imx->dma_is_inited = 1; return 0; err: spi_imx_sdma_exit(spi_imx); return ret; }
static void mmci_dma_setup(struct mmci_host *host) { const char *rxname, *txname; host->dma_rx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "rx"); host->dma_tx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "tx"); /* initialize pre request cookie */ host->next_data.cookie = 1; /* * If only an RX channel is specified, the driver will * attempt to use it bidirectionally, however if it is * is specified but cannot be located, DMA will be disabled. */ if (host->dma_rx_channel && !host->dma_tx_channel) host->dma_tx_channel = host->dma_rx_channel; if (host->dma_rx_channel) rxname = dma_chan_name(host->dma_rx_channel); else rxname = "none"; if (host->dma_tx_channel) txname = dma_chan_name(host->dma_tx_channel); else txname = "none"; dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n", rxname, txname); /* * Limit the maximum segment size in any SG entry according to * the parameters of the DMA engine device. */ if (host->dma_tx_channel) { struct device *dev = host->dma_tx_channel->device->dev; unsigned int max_seg_size = dma_get_max_seg_size(dev); if (max_seg_size < host->mmc->max_seg_size) host->mmc->max_seg_size = max_seg_size; } if (host->dma_rx_channel) { struct device *dev = host->dma_rx_channel->device->dev; unsigned int max_seg_size = dma_get_max_seg_size(dev); if (max_seg_size < host->mmc->max_seg_size) host->mmc->max_seg_size = max_seg_size; } if (host->ops && host->ops->dma_setup) host->ops->dma_setup(host); }
static int bcm2835_smi_dma_setup(struct bcm2835_smi_instance *inst) { int i, rv = 0; inst->dma_chan = dma_request_slave_channel(inst->dev, "rx-tx"); inst->dma_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; inst->dma_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; inst->dma_config.src_addr = inst->smi_regs_busaddr + SMID; inst->dma_config.dst_addr = inst->dma_config.src_addr; /* Direction unimportant - always overridden by prep_slave_sg */ inst->dma_config.direction = DMA_DEV_TO_MEM; dmaengine_slave_config(inst->dma_chan, &inst->dma_config); /* Alloc and map bounce buffers */ for (i = 0; i < DMA_BOUNCE_BUFFER_COUNT; ++i) { inst->bounce.buffer[i] = dmam_alloc_coherent(inst->dev, DMA_BOUNCE_BUFFER_SIZE, &inst->bounce.phys[i], GFP_KERNEL); if (!inst->bounce.buffer[i]) { dev_err(inst->dev, "Could not allocate buffer!"); rv = -ENOMEM; break; } smi_scatterlist_from_buffer( inst, inst->bounce.phys[i], DMA_BOUNCE_BUFFER_SIZE, &inst->bounce.sgl[i] ); } return rv; }
static void usdhi6_dma_request(struct usdhi6_host *host, phys_addr_t start) { struct dma_slave_config cfg = { .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, }; int ret; host->chan_tx = dma_request_slave_channel(mmc_dev(host->mmc), "tx"); dev_dbg(mmc_dev(host->mmc), "%s: TX: got channel %p\n", __func__, host->chan_tx); if (!host->chan_tx) return; cfg.direction = DMA_MEM_TO_DEV; cfg.dst_addr = start + USDHI6_SD_BUF0; cfg.dst_maxburst = 128; /* 128 words * 4 bytes = 512 bytes */ cfg.src_addr = 0; ret = dmaengine_slave_config(host->chan_tx, &cfg); if (ret < 0) goto e_release_tx; host->chan_rx = dma_request_slave_channel(mmc_dev(host->mmc), "rx"); dev_dbg(mmc_dev(host->mmc), "%s: RX: got channel %p\n", __func__, host->chan_rx); if (!host->chan_rx) goto e_release_tx; cfg.direction = DMA_DEV_TO_MEM; cfg.src_addr = cfg.dst_addr; cfg.src_maxburst = 128; /* 128 words * 4 bytes = 512 bytes */ cfg.dst_addr = 0; ret = dmaengine_slave_config(host->chan_rx, &cfg); if (ret < 0) goto e_release_rx; return; e_release_rx: dma_release_channel(host->chan_rx); host->chan_rx = NULL; e_release_tx: dma_release_channel(host->chan_tx); host->chan_tx = NULL; }
static struct dma_chan *imx_asrc_get_dma_channel(enum asrc_pair_index index, bool in) { char name[4]; sprintf(name, "%cx%c", in ? 'r' : 't', index + 'a'); return dma_request_slave_channel(asrc->dev, name); }
/** * Get DMA channel according to the pair and direction. */ struct dma_chan *fsl_asrc_get_dma_channel(struct fsl_asrc_pair *pair, bool dir) { struct fsl_asrc *asrc_priv = pair->asrc_priv; enum asrc_pair_index index = pair->index; char name[4]; sprintf(name, "%cx%c", dir == IN ? 'r' : 't', index + 'a'); return dma_request_slave_channel(&asrc_priv->pdev->dev, name); }
static int stm32_of_dma_tx_probe(struct stm32_port *stm32port, struct platform_device *pdev) { struct stm32_usart_offsets *ofs = &stm32port->info->ofs; struct uart_port *port = &stm32port->port; struct device *dev = &pdev->dev; struct dma_slave_config config; int ret; stm32port->tx_dma_busy = false; /* Request DMA TX channel */ stm32port->tx_ch = dma_request_slave_channel(dev, "tx"); if (!stm32port->tx_ch) { dev_info(dev, "tx dma alloc failed\n"); return -ENODEV; } stm32port->tx_buf = dma_alloc_coherent(&pdev->dev, TX_BUF_L, &stm32port->tx_dma_buf, GFP_KERNEL); if (!stm32port->tx_buf) { ret = -ENOMEM; goto alloc_err; } /* Configure DMA channel */ memset(&config, 0, sizeof(config)); config.dst_addr = port->mapbase + ofs->tdr; config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; ret = dmaengine_slave_config(stm32port->tx_ch, &config); if (ret < 0) { dev_err(dev, "tx dma channel config failed\n"); ret = -ENODEV; goto config_err; } return 0; config_err: dma_free_coherent(&pdev->dev, TX_BUF_L, stm32port->tx_buf, stm32port->tx_dma_buf); alloc_err: dma_release_channel(stm32port->tx_ch); stm32port->tx_ch = NULL; return ret; }
static int omap_pcm_open(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_dmaengine_dai_dma_data *dma_data; int ret; snd_soc_set_runtime_hwparams(substream, &omap_pcm_hardware); dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); /* DT boot: filter_data is the DMA name */ if (rtd->cpu_dai->dev->of_node) { struct dma_chan *chan; chan = dma_request_slave_channel(rtd->cpu_dai->dev, dma_data->filter_data); ret = snd_dmaengine_pcm_open(substream, chan); } else { ret = snd_dmaengine_pcm_open_request_chan(substream, omap_dma_filter_fn, dma_data->filter_data); } return ret; }
static void pxa_uart_dma_init(struct uart_pxa_port *up) { struct uart_pxa_dma *pxa_dma = &up->uart_dma; dma_cap_mask_t mask; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); if (NULL == pxa_dma->rxdma_chan) { pxa_dma->rxdma_chan = dma_request_slave_channel(up->port.dev, "rx"); if (NULL == pxa_dma->rxdma_chan) goto out; } if (NULL == pxa_dma->txdma_chan) { pxa_dma->txdma_chan = dma_request_slave_channel(up->port.dev, "tx"); if (NULL == pxa_dma->txdma_chan) goto err_txdma; } if (NULL == pxa_dma->txdma_addr) { pxa_dma->txdma_addr = dma_alloc_coherent(up->port.dev, DMA_BLOCK, &pxa_dma->txdma_addr_phys, GFP_KERNEL); if (!pxa_dma->txdma_addr) goto txdma_err_alloc; } if (NULL == pxa_dma->rxdma_addr) { pxa_dma->rxdma_addr = dma_alloc_coherent(up->port.dev, DMA_BLOCK, &pxa_dma->rxdma_addr_phys, GFP_KERNEL); if (!pxa_dma->rxdma_addr) goto rxdma_err_alloc; } #ifdef CONFIG_PM pxa_dma->tx_buf_save = kmalloc(DMA_BLOCK, GFP_KERNEL); if (!pxa_dma->tx_buf_save) goto buf_err_alloc; #endif pxa_dma->dma_status = 0; return; #ifdef CONFIG_PM buf_err_alloc: dma_free_coherent(up->port.dev, DMA_BLOCK, pxa_dma->rxdma_addr, pxa_dma->rxdma_addr_phys); pxa_dma->rxdma_addr = NULL; #endif rxdma_err_alloc: dma_free_coherent(up->port.dev, DMA_BLOCK, pxa_dma->txdma_addr, pxa_dma->txdma_addr_phys); pxa_dma->txdma_addr = NULL; txdma_err_alloc: dma_release_channel(pxa_dma->txdma_chan); pxa_dma->txdma_chan = NULL; err_txdma: dma_release_channel(pxa_dma->rxdma_chan); pxa_dma->rxdma_chan = NULL; out: return; }
static int img_spfi_probe(struct platform_device *pdev) { struct spi_master *master; struct img_spfi *spfi; struct resource *res; int ret; u32 max_speed_hz; master = spi_alloc_master(&pdev->dev, sizeof(*spfi)); if (!master) return -ENOMEM; platform_set_drvdata(pdev, master); spfi = spi_master_get_devdata(master); spfi->dev = &pdev->dev; spfi->master = master; spin_lock_init(&spfi->lock); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); spfi->regs = devm_ioremap_resource(spfi->dev, res); if (IS_ERR(spfi->regs)) { ret = PTR_ERR(spfi->regs); goto put_spi; } spfi->phys = res->start; spfi->irq = platform_get_irq(pdev, 0); if (spfi->irq < 0) { ret = spfi->irq; goto put_spi; } ret = devm_request_irq(spfi->dev, spfi->irq, img_spfi_irq, IRQ_TYPE_LEVEL_HIGH, dev_name(spfi->dev), spfi); if (ret) goto put_spi; spfi->sys_clk = devm_clk_get(spfi->dev, "sys"); if (IS_ERR(spfi->sys_clk)) { ret = PTR_ERR(spfi->sys_clk); goto put_spi; } spfi->spfi_clk = devm_clk_get(spfi->dev, "spfi"); if (IS_ERR(spfi->spfi_clk)) { ret = PTR_ERR(spfi->spfi_clk); goto put_spi; } ret = clk_prepare_enable(spfi->sys_clk); if (ret) goto put_spi; ret = clk_prepare_enable(spfi->spfi_clk); if (ret) goto disable_pclk; spfi_reset(spfi); /* * Only enable the error (IACCESS) interrupt. In PIO mode we'll * poll the status of the FIFOs. */ spfi_writel(spfi, SPFI_INTERRUPT_IACCESS, SPFI_INTERRUPT_ENABLE); master->auto_runtime_pm = true; master->bus_num = pdev->id; master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL; if (of_property_read_bool(spfi->dev->of_node, "img,supports-quad-mode")) master->mode_bits |= SPI_TX_QUAD | SPI_RX_QUAD; master->dev.of_node = pdev->dev.of_node; master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(8); master->max_speed_hz = clk_get_rate(spfi->spfi_clk) / 4; master->min_speed_hz = clk_get_rate(spfi->spfi_clk) / 512; /* * Maximum speed supported by spfi is limited to the lower value * between 1/4 of the SPFI clock or to "spfi-max-frequency" * defined in the device tree. * If no value is defined in the device tree assume the maximum * speed supported to be 1/4 of the SPFI clock. */ if (!of_property_read_u32(spfi->dev->of_node, "spfi-max-frequency", &max_speed_hz)) { if (master->max_speed_hz > max_speed_hz) master->max_speed_hz = max_speed_hz; } master->setup = img_spfi_setup; master->cleanup = img_spfi_cleanup; master->transfer_one = img_spfi_transfer_one; master->prepare_message = img_spfi_prepare; master->unprepare_message = img_spfi_unprepare; master->handle_err = img_spfi_handle_err; spfi->tx_ch = dma_request_slave_channel(spfi->dev, "tx"); spfi->rx_ch = dma_request_slave_channel(spfi->dev, "rx"); if (!spfi->tx_ch || !spfi->rx_ch) { if (spfi->tx_ch) dma_release_channel(spfi->tx_ch); if (spfi->rx_ch) dma_release_channel(spfi->rx_ch); dev_warn(spfi->dev, "Failed to get DMA channels, falling back to PIO mode\n"); } else { master->dma_tx = spfi->tx_ch; master->dma_rx = spfi->rx_ch; master->can_dma = img_spfi_can_dma; } pm_runtime_set_active(spfi->dev); pm_runtime_enable(spfi->dev); ret = devm_spi_register_master(spfi->dev, master); if (ret) goto disable_pm; return 0; disable_pm: pm_runtime_disable(spfi->dev); if (spfi->rx_ch) dma_release_channel(spfi->rx_ch); if (spfi->tx_ch) dma_release_channel(spfi->tx_ch); clk_disable_unprepare(spfi->spfi_clk); disable_pclk: clk_disable_unprepare(spfi->sys_clk); put_spi: spi_master_put(master); return ret; }
static int stm32_of_dma_rx_probe(struct stm32_port *stm32port, struct platform_device *pdev) { struct stm32_usart_offsets *ofs = &stm32port->info->ofs; struct uart_port *port = &stm32port->port; struct device *dev = &pdev->dev; struct dma_slave_config config; struct dma_async_tx_descriptor *desc = NULL; dma_cookie_t cookie; int ret; /* Request DMA RX channel */ stm32port->rx_ch = dma_request_slave_channel(dev, "rx"); if (!stm32port->rx_ch) { dev_info(dev, "rx dma alloc failed\n"); return -ENODEV; } stm32port->rx_buf = dma_alloc_coherent(&pdev->dev, RX_BUF_L, &stm32port->rx_dma_buf, GFP_KERNEL); if (!stm32port->rx_buf) { ret = -ENOMEM; goto alloc_err; } /* Configure DMA channel */ memset(&config, 0, sizeof(config)); config.src_addr = port->mapbase + ofs->rdr; config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; ret = dmaengine_slave_config(stm32port->rx_ch, &config); if (ret < 0) { dev_err(dev, "rx dma channel config failed\n"); ret = -ENODEV; goto config_err; } /* Prepare a DMA cyclic transaction */ desc = dmaengine_prep_dma_cyclic(stm32port->rx_ch, stm32port->rx_dma_buf, RX_BUF_L, RX_BUF_P, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); if (!desc) { dev_err(dev, "rx dma prep cyclic failed\n"); ret = -ENODEV; goto config_err; } /* No callback as dma buffer is drained on usart interrupt */ desc->callback = NULL; desc->callback_param = NULL; /* Push current DMA transaction in the pending queue */ cookie = dmaengine_submit(desc); /* Issue pending DMA requests */ dma_async_issue_pending(stm32port->rx_ch); return 0; config_err: dma_free_coherent(&pdev->dev, RX_BUF_L, stm32port->rx_buf, stm32port->rx_dma_buf); alloc_err: dma_release_channel(stm32port->rx_ch); stm32port->rx_ch = NULL; return ret; }
static int pxa_ata_probe(struct platform_device *pdev) { struct ata_host *host; struct ata_port *ap; struct pata_pxa_data *data; struct resource *cmd_res; struct resource *ctl_res; struct resource *dma_res; struct resource *irq_res; struct pata_pxa_pdata *pdata = dev_get_platdata(&pdev->dev); struct dma_slave_config config; int ret = 0; /* * Resource validation, three resources are needed: * - CMD port base address * - CTL port base address * - DMA port base address * - IRQ pin */ if (pdev->num_resources != 4) { dev_err(&pdev->dev, "invalid number of resources\n"); return -EINVAL; } /* * CMD port base address */ cmd_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (unlikely(cmd_res == NULL)) return -EINVAL; /* * CTL port base address */ ctl_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (unlikely(ctl_res == NULL)) return -EINVAL; /* * DMA port base address */ dma_res = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (unlikely(dma_res == NULL)) return -EINVAL; /* * IRQ pin */ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (unlikely(irq_res == NULL)) return -EINVAL; /* * Allocate the host */ host = ata_host_alloc(&pdev->dev, 1); if (!host) return -ENOMEM; ap = host->ports[0]; ap->ops = &pxa_ata_port_ops; ap->pio_mask = ATA_PIO4; ap->mwdma_mask = ATA_MWDMA2; ap->ioaddr.cmd_addr = devm_ioremap(&pdev->dev, cmd_res->start, resource_size(cmd_res)); ap->ioaddr.ctl_addr = devm_ioremap(&pdev->dev, ctl_res->start, resource_size(ctl_res)); ap->ioaddr.bmdma_addr = devm_ioremap(&pdev->dev, dma_res->start, resource_size(dma_res)); /* * Adjust register offsets */ ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr; ap->ioaddr.data_addr = ap->ioaddr.cmd_addr + (ATA_REG_DATA << pdata->reg_shift); ap->ioaddr.error_addr = ap->ioaddr.cmd_addr + (ATA_REG_ERR << pdata->reg_shift); ap->ioaddr.feature_addr = ap->ioaddr.cmd_addr + (ATA_REG_FEATURE << pdata->reg_shift); ap->ioaddr.nsect_addr = ap->ioaddr.cmd_addr + (ATA_REG_NSECT << pdata->reg_shift); ap->ioaddr.lbal_addr = ap->ioaddr.cmd_addr + (ATA_REG_LBAL << pdata->reg_shift); ap->ioaddr.lbam_addr = ap->ioaddr.cmd_addr + (ATA_REG_LBAM << pdata->reg_shift); ap->ioaddr.lbah_addr = ap->ioaddr.cmd_addr + (ATA_REG_LBAH << pdata->reg_shift); ap->ioaddr.device_addr = ap->ioaddr.cmd_addr + (ATA_REG_DEVICE << pdata->reg_shift); ap->ioaddr.status_addr = ap->ioaddr.cmd_addr + (ATA_REG_STATUS << pdata->reg_shift); ap->ioaddr.command_addr = ap->ioaddr.cmd_addr + (ATA_REG_CMD << pdata->reg_shift); /* * Allocate and load driver's internal data structure */ data = devm_kzalloc(&pdev->dev, sizeof(struct pata_pxa_data), GFP_KERNEL); if (!data) return -ENOMEM; ap->private_data = data; memset(&config, 0, sizeof(config)); config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; config.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; config.src_addr = dma_res->start; config.dst_addr = dma_res->start; config.src_maxburst = 32; config.dst_maxburst = 32; /* * Request the DMA channel */ data->dma_chan = dma_request_slave_channel(&pdev->dev, "data"); if (!data->dma_chan) return -EBUSY; ret = dmaengine_slave_config(data->dma_chan, &config); if (ret < 0) { dev_err(&pdev->dev, "dma configuration failed: %d\n", ret); return ret; } /* * Activate the ATA host */ ret = ata_host_activate(host, irq_res->start, ata_sff_interrupt, pdata->irq_flags, &pxa_ata_sht); if (ret) dma_release_channel(data->dma_chan); return ret; }
static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller) { struct musb *musb = controller->musb; struct device *dev = musb->controller; struct device_node *np = dev->of_node; struct cppi41_dma_channel *cppi41_channel; int count; int i; int ret; count = of_property_count_strings(np, "dma-names"); if (count < 0) return count; for (i = 0; i < count; i++) { struct dma_chan *dc; struct dma_channel *musb_dma; const char *str; unsigned is_tx; unsigned int port; ret = of_property_read_string_index(np, "dma-names", i, &str); if (ret) goto err; if (!strncmp(str, "tx", 2)) is_tx = 1; else if (!strncmp(str, "rx", 2)) is_tx = 0; else { dev_err(dev, "Wrong dmatype %s\n", str); goto err; } ret = kstrtouint(str + 2, 0, &port); if (ret) goto err; ret = -EINVAL; if (port > MUSB_DMA_NUM_CHANNELS || !port) goto err; if (is_tx) cppi41_channel = &controller->tx_channel[port - 1]; else cppi41_channel = &controller->rx_channel[port - 1]; cppi41_channel->controller = controller; cppi41_channel->port_num = port; cppi41_channel->is_tx = is_tx; musb_dma = &cppi41_channel->channel; musb_dma->private_data = cppi41_channel; musb_dma->status = MUSB_DMA_STATUS_FREE; musb_dma->max_len = SZ_4M; dc = dma_request_slave_channel(dev, str); if (!dc) { dev_err(dev, "Falied to request %s.\n", str); ret = -EPROBE_DEFER; goto err; } cppi41_channel->dc = dc; } return 0; err: cppi41_release_all_dma_chans(controller); return ret; }
static int sirfsoc_uart_probe(struct platform_device *pdev) { struct sirfsoc_uart_port *sirfport; struct uart_port *port; struct resource *res; int ret; int i, j; struct dma_slave_config slv_cfg = { .src_maxburst = 2, }; struct dma_slave_config tx_slv_cfg = { .dst_maxburst = 2, }; const struct of_device_id *match; match = of_match_node(sirfsoc_uart_ids, pdev->dev.of_node); if (of_property_read_u32(pdev->dev.of_node, "cell-index", &pdev->id)) { dev_err(&pdev->dev, "Unable to find cell-index in uart node.\n"); ret = -EFAULT; goto err; } if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-usp-uart")) pdev->id += ((struct sirfsoc_uart_register *) match->data)->uart_param.register_uart_nr; sirfport = &sirfsoc_uart_ports[pdev->id]; port = &sirfport->port; port->dev = &pdev->dev; port->private_data = sirfport; sirfport->uart_reg = (struct sirfsoc_uart_register *)match->data; sirfport->hw_flow_ctrl = of_property_read_bool(pdev->dev.of_node, "sirf,uart-has-rtscts"); if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-uart")) sirfport->uart_reg->uart_type = SIRF_REAL_UART; if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-usp-uart")) { sirfport->uart_reg->uart_type = SIRF_USP_UART; if (!sirfport->hw_flow_ctrl) goto usp_no_flow_control; if (of_find_property(pdev->dev.of_node, "cts-gpios", NULL)) sirfport->cts_gpio = of_get_named_gpio( pdev->dev.of_node, "cts-gpios", 0); else sirfport->cts_gpio = -1; if (of_find_property(pdev->dev.of_node, "rts-gpios", NULL)) sirfport->rts_gpio = of_get_named_gpio( pdev->dev.of_node, "rts-gpios", 0); else sirfport->rts_gpio = -1; if ((!gpio_is_valid(sirfport->cts_gpio) || !gpio_is_valid(sirfport->rts_gpio))) { ret = -EINVAL; dev_err(&pdev->dev, "Usp flow control must have cts and rts gpio"); goto err; } ret = devm_gpio_request(&pdev->dev, sirfport->cts_gpio, "usp-cts-gpio"); if (ret) { dev_err(&pdev->dev, "Unable request cts gpio"); goto err; } gpio_direction_input(sirfport->cts_gpio); ret = devm_gpio_request(&pdev->dev, sirfport->rts_gpio, "usp-rts-gpio"); if (ret) { dev_err(&pdev->dev, "Unable request rts gpio"); goto err; } gpio_direction_output(sirfport->rts_gpio, 1); } usp_no_flow_control: if (of_device_is_compatible(pdev->dev.of_node, "sirf,atlas7-uart")) sirfport->is_atlas7 = true; if (of_property_read_u32(pdev->dev.of_node, "fifosize", &port->fifosize)) { dev_err(&pdev->dev, "Unable to find fifosize in uart node.\n"); ret = -EFAULT; goto err; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev, "Insufficient resources.\n"); ret = -EFAULT; goto err; } tasklet_init(&sirfport->rx_dma_complete_tasklet, sirfsoc_uart_rx_dma_complete_tl, (unsigned long)sirfport); tasklet_init(&sirfport->rx_tmo_process_tasklet, sirfsoc_rx_tmo_process_tl, (unsigned long)sirfport); port->mapbase = res->start; port->membase = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!port->membase) { dev_err(&pdev->dev, "Cannot remap resource.\n"); ret = -ENOMEM; goto err; } res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (res == NULL) { dev_err(&pdev->dev, "Insufficient resources.\n"); ret = -EFAULT; goto err; } port->irq = res->start; sirfport->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(sirfport->clk)) { ret = PTR_ERR(sirfport->clk); goto err; } port->uartclk = clk_get_rate(sirfport->clk); if (of_device_is_compatible(pdev->dev.of_node, "sirf,atlas7-bt-uart")) { sirfport->clk_general = devm_clk_get(&pdev->dev, "general"); if (IS_ERR(sirfport->clk_general)) { ret = PTR_ERR(sirfport->clk_general); goto err; } sirfport->clk_noc = devm_clk_get(&pdev->dev, "noc"); if (IS_ERR(sirfport->clk_noc)) { ret = PTR_ERR(sirfport->clk_noc); goto err; } sirfport->is_bt_uart = true; } else sirfport->is_bt_uart = false; port->ops = &sirfsoc_uart_ops; spin_lock_init(&port->lock); platform_set_drvdata(pdev, sirfport); ret = uart_add_one_port(&sirfsoc_uart_drv, port); if (ret != 0) { dev_err(&pdev->dev, "Cannot add UART port(%d).\n", pdev->id); goto err; } sirfport->rx_dma_chan = dma_request_slave_channel(port->dev, "rx"); for (i = 0; sirfport->rx_dma_chan && i < SIRFSOC_RX_LOOP_BUF_CNT; i++) { sirfport->rx_dma_items[i].xmit.buf = dma_alloc_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE, &sirfport->rx_dma_items[i].dma_addr, GFP_KERNEL); if (!sirfport->rx_dma_items[i].xmit.buf) { dev_err(port->dev, "Uart alloc bufa failed\n"); ret = -ENOMEM; goto alloc_coherent_err; } sirfport->rx_dma_items[i].xmit.head = sirfport->rx_dma_items[i].xmit.tail = 0; } if (sirfport->rx_dma_chan) dmaengine_slave_config(sirfport->rx_dma_chan, &slv_cfg); sirfport->tx_dma_chan = dma_request_slave_channel(port->dev, "tx"); if (sirfport->tx_dma_chan) dmaengine_slave_config(sirfport->tx_dma_chan, &tx_slv_cfg); return 0; alloc_coherent_err: for (j = 0; j < i; j++) dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE, sirfport->rx_dma_items[j].xmit.buf, sirfport->rx_dma_items[j].dma_addr); dma_release_channel(sirfport->rx_dma_chan); err: return ret; } static int sirfsoc_uart_remove(struct platform_device *pdev) { struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev); struct uart_port *port = &sirfport->port; uart_remove_one_port(&sirfsoc_uart_drv, port); if (sirfport->rx_dma_chan) { int i; dmaengine_terminate_all(sirfport->rx_dma_chan); dma_release_channel(sirfport->rx_dma_chan); for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++) dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE, sirfport->rx_dma_items[i].xmit.buf, sirfport->rx_dma_items[i].dma_addr); } if (sirfport->tx_dma_chan) { dmaengine_terminate_all(sirfport->tx_dma_chan); dma_release_channel(sirfport->tx_dma_chan); } return 0; } #ifdef CONFIG_PM_SLEEP static int sirfsoc_uart_suspend(struct device *pdev) { struct sirfsoc_uart_port *sirfport = dev_get_drvdata(pdev); struct uart_port *port = &sirfport->port; uart_suspend_port(&sirfsoc_uart_drv, port); return 0; } static int sirfsoc_uart_resume(struct device *pdev) { struct sirfsoc_uart_port *sirfport = dev_get_drvdata(pdev); struct uart_port *port = &sirfport->port; uart_resume_port(&sirfsoc_uart_drv, port); return 0; } #endif static const struct dev_pm_ops sirfsoc_uart_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_uart_suspend, sirfsoc_uart_resume) }; static struct platform_driver sirfsoc_uart_driver = { .probe = sirfsoc_uart_probe, .remove = sirfsoc_uart_remove, .driver = { .name = SIRFUART_PORT_NAME, .of_match_table = sirfsoc_uart_ids, .pm = &sirfsoc_uart_pm_ops, }, }; static int __init sirfsoc_uart_init(void) { int ret = 0; ret = uart_register_driver(&sirfsoc_uart_drv); if (ret) goto out; ret = platform_driver_register(&sirfsoc_uart_driver); if (ret) uart_unregister_driver(&sirfsoc_uart_drv); out: return ret; } module_init(sirfsoc_uart_init); static void __exit sirfsoc_uart_exit(void) { platform_driver_unregister(&sirfsoc_uart_driver); uart_unregister_driver(&sirfsoc_uart_drv); }
static int sirfsoc_uart_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct sirfsoc_uart_port *sirfport; struct uart_port *port; struct resource *res; int ret; struct dma_slave_config slv_cfg = { .src_maxburst = 1, }; struct dma_slave_config tx_slv_cfg = { .dst_maxburst = 2, }; const struct of_device_id *match; match = of_match_node(sirfsoc_uart_ids, np); sirfport = devm_kzalloc(&pdev->dev, sizeof(*sirfport), GFP_KERNEL); if (!sirfport) { ret = -ENOMEM; goto err; } sirfport->port.line = of_alias_get_id(np, "serial"); sirf_ports[sirfport->port.line] = sirfport; sirfport->port.iotype = UPIO_MEM; sirfport->port.flags = UPF_BOOT_AUTOCONF; port = &sirfport->port; port->dev = &pdev->dev; port->private_data = sirfport; sirfport->uart_reg = (struct sirfsoc_uart_register *)match->data; sirfport->hw_flow_ctrl = of_property_read_bool(np, "uart-has-rtscts") || of_property_read_bool(np, "sirf,uart-has-rtscts") /* deprecated */; if (of_device_is_compatible(np, "sirf,prima2-uart") || of_device_is_compatible(np, "sirf,atlas7-uart")) sirfport->uart_reg->uart_type = SIRF_REAL_UART; if (of_device_is_compatible(np, "sirf,prima2-usp-uart") || of_device_is_compatible(np, "sirf,atlas7-usp-uart")) { sirfport->uart_reg->uart_type = SIRF_USP_UART; if (!sirfport->hw_flow_ctrl) goto usp_no_flow_control; if (of_find_property(np, "cts-gpios", NULL)) sirfport->cts_gpio = of_get_named_gpio(np, "cts-gpios", 0); else sirfport->cts_gpio = -1; if (of_find_property(np, "rts-gpios", NULL)) sirfport->rts_gpio = of_get_named_gpio(np, "rts-gpios", 0); else sirfport->rts_gpio = -1; if ((!gpio_is_valid(sirfport->cts_gpio) || !gpio_is_valid(sirfport->rts_gpio))) { ret = -EINVAL; dev_err(&pdev->dev, "Usp flow control must have cts and rts gpio"); goto err; } ret = devm_gpio_request(&pdev->dev, sirfport->cts_gpio, "usp-cts-gpio"); if (ret) { dev_err(&pdev->dev, "Unable request cts gpio"); goto err; } gpio_direction_input(sirfport->cts_gpio); ret = devm_gpio_request(&pdev->dev, sirfport->rts_gpio, "usp-rts-gpio"); if (ret) { dev_err(&pdev->dev, "Unable request rts gpio"); goto err; } gpio_direction_output(sirfport->rts_gpio, 1); } usp_no_flow_control: if (of_device_is_compatible(np, "sirf,atlas7-uart") || of_device_is_compatible(np, "sirf,atlas7-usp-uart")) sirfport->is_atlas7 = true; if (of_property_read_u32(np, "fifosize", &port->fifosize)) { dev_err(&pdev->dev, "Unable to find fifosize in uart node.\n"); ret = -EFAULT; goto err; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev, "Insufficient resources.\n"); ret = -EFAULT; goto err; } port->mapbase = res->start; port->membase = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!port->membase) { dev_err(&pdev->dev, "Cannot remap resource.\n"); ret = -ENOMEM; goto err; } res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (res == NULL) { dev_err(&pdev->dev, "Insufficient resources.\n"); ret = -EFAULT; goto err; } port->irq = res->start; sirfport->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(sirfport->clk)) { ret = PTR_ERR(sirfport->clk); goto err; } port->uartclk = clk_get_rate(sirfport->clk); port->ops = &sirfsoc_uart_ops; spin_lock_init(&port->lock); platform_set_drvdata(pdev, sirfport); ret = uart_add_one_port(&sirfsoc_uart_drv, port); if (ret != 0) { dev_err(&pdev->dev, "Cannot add UART port(%d).\n", pdev->id); goto err; } sirfport->rx_dma_chan = dma_request_slave_channel(port->dev, "rx"); sirfport->rx_dma_items.xmit.buf = dma_alloc_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE, &sirfport->rx_dma_items.dma_addr, GFP_KERNEL); if (!sirfport->rx_dma_items.xmit.buf) { dev_err(port->dev, "Uart alloc bufa failed\n"); ret = -ENOMEM; goto alloc_coherent_err; } sirfport->rx_dma_items.xmit.head = sirfport->rx_dma_items.xmit.tail = 0; if (sirfport->rx_dma_chan) dmaengine_slave_config(sirfport->rx_dma_chan, &slv_cfg); sirfport->tx_dma_chan = dma_request_slave_channel(port->dev, "tx"); if (sirfport->tx_dma_chan) dmaengine_slave_config(sirfport->tx_dma_chan, &tx_slv_cfg); if (sirfport->rx_dma_chan) { hrtimer_init(&sirfport->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL); sirfport->hrt.function = sirfsoc_uart_rx_dma_hrtimer_callback; sirfport->is_hrt_enabled = false; } return 0; alloc_coherent_err: dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE, sirfport->rx_dma_items.xmit.buf, sirfport->rx_dma_items.dma_addr); dma_release_channel(sirfport->rx_dma_chan); err: return ret; } static int sirfsoc_uart_remove(struct platform_device *pdev) { struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev); struct uart_port *port = &sirfport->port; uart_remove_one_port(&sirfsoc_uart_drv, port); if (sirfport->rx_dma_chan) { dmaengine_terminate_all(sirfport->rx_dma_chan); dma_release_channel(sirfport->rx_dma_chan); dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE, sirfport->rx_dma_items.xmit.buf, sirfport->rx_dma_items.dma_addr); } if (sirfport->tx_dma_chan) { dmaengine_terminate_all(sirfport->tx_dma_chan); dma_release_channel(sirfport->tx_dma_chan); } return 0; } #ifdef CONFIG_PM_SLEEP static int sirfsoc_uart_suspend(struct device *pdev) { struct sirfsoc_uart_port *sirfport = dev_get_drvdata(pdev); struct uart_port *port = &sirfport->port; uart_suspend_port(&sirfsoc_uart_drv, port); return 0; } static int sirfsoc_uart_resume(struct device *pdev) { struct sirfsoc_uart_port *sirfport = dev_get_drvdata(pdev); struct uart_port *port = &sirfport->port; uart_resume_port(&sirfsoc_uart_drv, port); return 0; } #endif static const struct dev_pm_ops sirfsoc_uart_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_uart_suspend, sirfsoc_uart_resume) }; static struct platform_driver sirfsoc_uart_driver = { .probe = sirfsoc_uart_probe, .remove = sirfsoc_uart_remove, .driver = { .name = SIRFUART_PORT_NAME, .of_match_table = sirfsoc_uart_ids, .pm = &sirfsoc_uart_pm_ops, }, }; static int __init sirfsoc_uart_init(void) { int ret = 0; ret = uart_register_driver(&sirfsoc_uart_drv); if (ret) goto out; ret = platform_driver_register(&sirfsoc_uart_driver); if (ret) uart_unregister_driver(&sirfsoc_uart_drv); out: return ret; } module_init(sirfsoc_uart_init); static void __exit sirfsoc_uart_exit(void) { platform_driver_unregister(&sirfsoc_uart_driver); uart_unregister_driver(&sirfsoc_uart_drv); }
static int bcm2835_sdhost_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *node = dev->of_node; struct clk *clk; struct resource *iomem; struct bcm2835_host *host; struct mmc_host *mmc; int ret; pr_debug("bcm2835_sdhost_probe\n"); mmc = mmc_alloc_host(sizeof(*host), dev); if (!mmc) return -ENOMEM; mmc->ops = &bcm2835_sdhost_ops; host = mmc_priv(mmc); host->mmc = mmc; host->pio_timeout = msecs_to_jiffies(500); host->max_delay = 1; /* Warn if over 1ms */ spin_lock_init(&host->lock); iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); host->ioaddr = devm_ioremap_resource(dev, iomem); if (IS_ERR(host->ioaddr)) { ret = PTR_ERR(host->ioaddr); goto err; } host->phys_addr = iomem->start + BCM2835_VCMMU_SHIFT; pr_debug(" - ioaddr %lx, iomem->start %lx, phys_addr %lx\n", (unsigned long)host->ioaddr, (unsigned long)iomem->start, (unsigned long)host->phys_addr); host->allow_dma = ALLOW_DMA; if (node) { /* Read any custom properties */ of_property_read_u32(node, "brcm,delay-after-stop", &host->delay_after_stop); of_property_read_u32(node, "brcm,overclock-50", &host->overclock_50); of_property_read_u32(node, "brcm,pio-limit", &host->pio_limit); host->allow_dma = ALLOW_DMA && !of_property_read_bool(node, "brcm,force-pio"); host->debug = of_property_read_bool(node, "brcm,debug"); } if (host->allow_dma) { if (node) { host->dma_chan_tx = dma_request_slave_channel(dev, "tx"); host->dma_chan_rx = dma_request_slave_channel(dev, "rx"); } else { dma_cap_mask_t mask; dma_cap_zero(mask); /* we don't care about the channel, any would work */ dma_cap_set(DMA_SLAVE, mask); host->dma_chan_tx = dma_request_channel(mask, NULL, NULL); host->dma_chan_rx = dma_request_channel(mask, NULL, NULL); } } clk = devm_clk_get(dev, NULL); if (IS_ERR(clk)) { dev_err(dev, "could not get clk\n"); ret = PTR_ERR(clk); goto err; } host->max_clk = clk_get_rate(clk); host->irq = platform_get_irq(pdev, 0); if (host->irq <= 0) { dev_err(dev, "get IRQ failed\n"); ret = -EINVAL; goto err; } pr_debug(" - max_clk %lx, irq %d\n", (unsigned long)host->max_clk, (int)host->irq); if (node) mmc_of_parse(mmc); else mmc->caps |= MMC_CAP_4_BIT_DATA; ret = bcm2835_sdhost_add_host(host); if (ret) goto err; platform_set_drvdata(pdev, host); pr_debug("bcm2835_sdhost_probe -> OK\n"); return 0; err: pr_debug("bcm2835_sdhost_probe -> err %d\n", ret); mmc_free_host(mmc); return ret; }
static int rockchip_spi_probe(struct platform_device *pdev) { int ret = 0; struct rockchip_spi *rs; struct spi_master *master; struct resource *mem; master = spi_alloc_master(&pdev->dev, sizeof(struct rockchip_spi)); if (!master) return -ENOMEM; platform_set_drvdata(pdev, master); rs = spi_master_get_devdata(master); memset(rs, 0, sizeof(struct rockchip_spi)); /* Get basic io resource and map it */ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); rs->regs = devm_ioremap_resource(&pdev->dev, mem); if (IS_ERR(rs->regs)) { ret = PTR_ERR(rs->regs); goto err_ioremap_resource; } rs->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk"); if (IS_ERR(rs->apb_pclk)) { dev_err(&pdev->dev, "Failed to get apb_pclk\n"); ret = PTR_ERR(rs->apb_pclk); goto err_ioremap_resource; } rs->spiclk = devm_clk_get(&pdev->dev, "spiclk"); if (IS_ERR(rs->spiclk)) { dev_err(&pdev->dev, "Failed to get spi_pclk\n"); ret = PTR_ERR(rs->spiclk); goto err_ioremap_resource; } ret = clk_prepare_enable(rs->apb_pclk); if (ret) { dev_err(&pdev->dev, "Failed to enable apb_pclk\n"); goto err_ioremap_resource; } ret = clk_prepare_enable(rs->spiclk); if (ret) { dev_err(&pdev->dev, "Failed to enable spi_clk\n"); goto err_spiclk_enable; } spi_enable_chip(rs, 0); rs->type = SSI_MOTO_SPI; rs->master = master; rs->dev = &pdev->dev; rs->max_freq = clk_get_rate(rs->spiclk); rs->fifo_len = get_fifo_len(rs); if (!rs->fifo_len) { dev_err(&pdev->dev, "Failed to get fifo length\n"); ret = -EINVAL; goto err_get_fifo_len; } spin_lock_init(&rs->lock); pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); master->auto_runtime_pm = true; master->bus_num = pdev->id; master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP; master->num_chipselect = 2; master->dev.of_node = pdev->dev.of_node; master->bits_per_word_mask = SPI_BPW_MASK(16) | SPI_BPW_MASK(8); master->set_cs = rockchip_spi_set_cs; master->prepare_message = rockchip_spi_prepare_message; master->unprepare_message = rockchip_spi_unprepare_message; master->transfer_one = rockchip_spi_transfer_one; rs->dma_tx.ch = dma_request_slave_channel(rs->dev, "tx"); if (!rs->dma_tx.ch) dev_warn(rs->dev, "Failed to request TX DMA channel\n"); rs->dma_rx.ch = dma_request_slave_channel(rs->dev, "rx"); if (!rs->dma_rx.ch) { if (rs->dma_tx.ch) { dma_release_channel(rs->dma_tx.ch); rs->dma_tx.ch = NULL; } dev_warn(rs->dev, "Failed to request RX DMA channel\n"); } if (rs->dma_tx.ch && rs->dma_rx.ch) { rs->dma_tx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_TXDR); rs->dma_rx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_RXDR); rs->dma_tx.direction = DMA_MEM_TO_DEV; rs->dma_rx.direction = DMA_DEV_TO_MEM; master->can_dma = rockchip_spi_can_dma; master->dma_tx = rs->dma_tx.ch; master->dma_rx = rs->dma_rx.ch; } ret = devm_spi_register_master(&pdev->dev, master); if (ret) { dev_err(&pdev->dev, "Failed to register master\n"); goto err_register_master; } return 0; err_register_master: if (rs->dma_tx.ch) dma_release_channel(rs->dma_tx.ch); if (rs->dma_rx.ch) dma_release_channel(rs->dma_rx.ch); err_get_fifo_len: clk_disable_unprepare(rs->spiclk); err_spiclk_enable: clk_disable_unprepare(rs->apb_pclk); err_ioremap_resource: spi_master_put(master); return ret; }
static int vdmafb_probe(struct platform_device *pdev) { int ret = 0; struct vdmafb_dev *fbdev; struct resource *res; int fbsize; struct backlight_properties props; struct backlight_device *bl; fbdev = devm_kzalloc(&pdev->dev, sizeof(*fbdev), GFP_KERNEL); if (!fbdev) return -ENOMEM; platform_set_drvdata(pdev, fbdev); fbdev->info.fbops = &vdmafb_ops; fbdev->info.device = &pdev->dev; fbdev->info.par = fbdev; fbdev->dma_template = devm_kzalloc(&pdev->dev, sizeof(struct dma_interleaved_template) + sizeof(struct data_chunk), GFP_KERNEL); if (!fbdev->dma_template) return -ENOMEM; vdmafb_init_var(fbdev, pdev); vdmafb_init_fix(fbdev); /* Request I/O resource */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "I/O resource request failed\n"); return -ENXIO; } res->flags &= ~IORESOURCE_CACHEABLE; fbdev->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(fbdev->regs)) return PTR_ERR(fbdev->regs); /* Allocate framebuffer memory */ fbsize = fbdev->info.fix.smem_len; fbdev->fb_virt = dma_alloc_coherent(&pdev->dev, PAGE_ALIGN(fbsize), &fbdev->fb_phys, GFP_KERNEL); if (!fbdev->fb_virt) { dev_err(&pdev->dev, "Frame buffer memory allocation failed\n"); return -ENOMEM; } fbdev->info.fix.smem_start = fbdev->fb_phys; fbdev->info.screen_base = fbdev->fb_virt; fbdev->info.pseudo_palette = fbdev->pseudo_palette; pr_debug("%s virt=%p phys=%x size=%d\n", __func__, fbdev->fb_virt, fbdev->fb_phys, fbsize); /* Clear framebuffer */ memset_io(fbdev->fb_virt, 0, fbsize); fbdev->dma = dma_request_slave_channel(&pdev->dev, "video"); if (IS_ERR_OR_NULL(fbdev->dma)) { dev_err(&pdev->dev, "Failed to allocate DMA channel (%d).\n", ret); if (fbdev->dma) ret = PTR_ERR(fbdev->dma); else ret = -EPROBE_DEFER; goto err_dma_free; } /* Setup and enable the framebuffer */ vdmafb_setupfb(fbdev); ret = fb_alloc_cmap(&fbdev->info.cmap, 256, 0); if (ret) { dev_err(&pdev->dev, "fb_alloc_cmap failed\n"); } /* Register framebuffer */ ret = register_framebuffer(&fbdev->info); if (ret) { dev_err(&pdev->dev, "Framebuffer registration failed\n"); goto err_channel_free; } /* Register backlight */ memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; props.max_brightness = 1023; bl = backlight_device_register("backlight", &pdev->dev, fbdev, &vdmafb_bl_ops, &props); if (IS_ERR(bl)) { dev_err(&pdev->dev, "error %ld on backlight register\n", PTR_ERR(bl)); } else { fbdev->backlight = bl; bl->props.power = FB_BLANK_UNBLANK; bl->props.fb_blank = FB_BLANK_UNBLANK; bl->props.brightness = vdmafb_bl_get_brightness(bl); } return 0; err_channel_free: dma_release_channel(fbdev->dma); err_dma_free: dma_free_coherent(&pdev->dev, PAGE_ALIGN(fbsize), fbdev->fb_virt, fbdev->fb_phys); return ret; }
static int ux500_dma_controller_start(struct ux500_dma_controller *controller) { struct ux500_dma_channel *ux500_channel = NULL; struct musb *musb = controller->private_data; struct device *dev = musb->controller; struct musb_hdrc_platform_data *plat = dev_get_platdata(dev); struct ux500_musb_board_data *data; struct dma_channel *dma_channel = NULL; char **chan_names; u32 ch_num; u8 dir; u8 is_tx = 0; void **param_array; struct ux500_dma_channel *channel_array; dma_cap_mask_t mask; if (!plat) { dev_err(musb->controller, "No platform data\n"); return -EINVAL; } data = plat->board_data; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); /* Prepare the loop for RX channels */ channel_array = controller->rx_channel; param_array = data ? data->dma_rx_param_array : NULL; chan_names = (char **)iep_chan_names; for (dir = 0; dir < 2; dir++) { for (ch_num = 0; ch_num < UX500_MUSB_DMA_NUM_RX_TX_CHANNELS; ch_num++) { ux500_channel = &channel_array[ch_num]; ux500_channel->controller = controller; ux500_channel->ch_num = ch_num; ux500_channel->is_tx = is_tx; dma_channel = &(ux500_channel->channel); dma_channel->private_data = ux500_channel; dma_channel->status = MUSB_DMA_STATUS_FREE; dma_channel->max_len = SZ_16M; ux500_channel->dma_chan = dma_request_slave_channel(dev, chan_names[ch_num]); if (!ux500_channel->dma_chan) ux500_channel->dma_chan = dma_request_channel(mask, data ? data->dma_filter : NULL, param_array[ch_num]); if (!ux500_channel->dma_chan) { ERR("Dma pipe allocation error dir=%d ch=%d\n", dir, ch_num); /* Release already allocated channels */ ux500_dma_controller_stop(controller); return -EBUSY; } } /* Prepare the loop for TX channels */ channel_array = controller->tx_channel; param_array = data ? data->dma_tx_param_array : NULL; chan_names = (char **)oep_chan_names; is_tx = 1; } return 0; }
static int mxs_mmc_probe(struct platform_device *pdev) { const struct of_device_id *of_id = of_match_device(mxs_mmc_dt_ids, &pdev->dev); struct device_node *np = pdev->dev.of_node; struct mxs_mmc_host *host; struct mmc_host *mmc; struct resource *iores; int ret = 0, irq_err; struct regulator *reg_vmmc; struct mxs_ssp *ssp; irq_err = platform_get_irq(pdev, 0); if (irq_err < 0) return irq_err; mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev); if (!mmc) return -ENOMEM; host = mmc_priv(mmc); ssp = &host->ssp; ssp->dev = &pdev->dev; iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); ssp->base = devm_ioremap_resource(&pdev->dev, iores); if (IS_ERR(ssp->base)) { ret = PTR_ERR(ssp->base); goto out_mmc_free; } ssp->devid = (enum mxs_ssp_id) of_id->data; host->mmc = mmc; host->sdio_irq_en = 0; reg_vmmc = devm_regulator_get(&pdev->dev, "vmmc"); if (!IS_ERR(reg_vmmc)) { ret = regulator_enable(reg_vmmc); if (ret) { dev_err(&pdev->dev, "Failed to enable vmmc regulator: %d\n", ret); goto out_mmc_free; } } ssp->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(ssp->clk)) { ret = PTR_ERR(ssp->clk); goto out_mmc_free; } ret = clk_prepare_enable(ssp->clk); if (ret) goto out_mmc_free; ret = mxs_mmc_reset(host); if (ret) { dev_err(&pdev->dev, "Failed to reset mmc: %d\n", ret); goto out_clk_disable; } ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx"); if (!ssp->dmach) { dev_err(mmc_dev(host->mmc), "%s: failed to request dma\n", __func__); ret = -ENODEV; goto out_clk_disable; } /* set mmc core parameters */ mmc->ops = &mxs_mmc_ops; mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL; host->broken_cd = of_property_read_bool(np, "broken-cd"); mmc->f_min = 400000; mmc->f_max = 288000000; ret = mmc_of_parse(mmc); if (ret) goto out_clk_disable; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; mmc->max_segs = 52; mmc->max_blk_size = 1 << 0xf; mmc->max_blk_count = (ssp_is_old(ssp)) ? 0xff : 0xffffff; mmc->max_req_size = (ssp_is_old(ssp)) ? 0xffff : 0xffffffff; mmc->max_seg_size = dma_get_max_seg_size(ssp->dmach->device->dev); platform_set_drvdata(pdev, mmc); ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0, dev_name(&pdev->dev), host); if (ret) goto out_free_dma; spin_lock_init(&host->lock); ret = mmc_add_host(mmc); if (ret) goto out_free_dma; dev_info(mmc_dev(host->mmc), "initialized\n"); return 0; out_free_dma: dma_release_channel(ssp->dmach); out_clk_disable: clk_disable_unprepare(ssp->clk); out_mmc_free: mmc_free_host(mmc); return ret; }
int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma, enum v4l2_buf_type type, unsigned int port) { char name[14]; int ret; dma->xdev = xdev; dma->port = port; dma->fmtinfo = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT); dma->format.pixelformat = dma->fmtinfo->fourcc; dma->format.colorspace = V4L2_COLORSPACE_SRGB; dma->format.field = V4L2_FIELD_NONE; dma->format.width = XVIP_DMA_DEF_WIDTH; dma->format.height = XVIP_DMA_DEF_HEIGHT; dma->format.bytesperline = dma->format.width * dma->fmtinfo->bpp; dma->format.sizeimage = dma->format.bytesperline * dma->format.height; /* Initialize the media entity... */ dma->pad.flags = type == V4L2_BUF_TYPE_VIDEO_CAPTURE ? MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE; ret = media_entity_init(&dma->video.entity, 1, &dma->pad, 0); if (ret < 0) return ret; mutex_init(&dma->lock); mutex_init(&dma->pipe.lock); /* ... and the video node... */ dma->video.v4l2_dev = &xdev->v4l2_dev; dma->video.fops = &xvip_dma_fops; snprintf(dma->video.name, sizeof(dma->video.name), "%s %s %u", xdev->dev->of_node->name, type == V4L2_BUF_TYPE_VIDEO_CAPTURE ? "output" : "input", port); dma->video.vfl_type = VFL_TYPE_GRABBER; dma->video.vfl_dir = type == V4L2_BUF_TYPE_VIDEO_CAPTURE ? VFL_DIR_RX : VFL_DIR_TX; dma->video.release = video_device_release_empty; dma->video.ioctl_ops = &xvip_dma_ioctl_ops; video_set_drvdata(&dma->video, dma); /* ... and the buffers queue... */ dma->alloc_ctx = vb2_dma_contig_init_ctx(dma->xdev->dev); if (IS_ERR(dma->alloc_ctx)) goto error; dma->queue.type = type; dma->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; dma->queue.drv_priv = dma; dma->queue.buf_struct_size = sizeof(struct xvip_dma_buffer); dma->queue.ops = &xvip_dma_queue_qops; dma->queue.mem_ops = &vb2_dma_contig_memops; dma->queue.timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; ret = vb2_queue_init(&dma->queue); if (ret < 0) { dev_err(dma->xdev->dev, "failed to initialize VB2 queue\n"); goto error; } /* ... and the DMA channel. */ sprintf(name, "port%u", port); dma->dma = dma_request_slave_channel(dma->xdev->dev, name); if (dma->dma == NULL) { dev_err(dma->xdev->dev, "no VDMA channel found\n"); ret = -ENODEV; goto error; } dma->align = 1 << dma->dma->device->copy_align; ret = video_register_device(&dma->video, VFL_TYPE_GRABBER, -1); if (ret < 0) { dev_err(dma->xdev->dev, "failed to register video device\n"); goto error; } return 0; error: xvip_dma_cleanup(dma); return ret; }