static void mmci_dma_setup(struct mmci_host *host) { const char *rxname, *txname; host->dma_rx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "rx"); host->dma_tx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "tx"); /* initialize pre request cookie */ host->next_data.cookie = 1; /* * If only an RX channel is specified, the driver will * attempt to use it bidirectionally, however if it is * is specified but cannot be located, DMA will be disabled. */ if (host->dma_rx_channel && !host->dma_tx_channel) host->dma_tx_channel = host->dma_rx_channel; if (host->dma_rx_channel) rxname = dma_chan_name(host->dma_rx_channel); else rxname = "none"; if (host->dma_tx_channel) txname = dma_chan_name(host->dma_tx_channel); else txname = "none"; dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n", rxname, txname); /* * Limit the maximum segment size in any SG entry according to * the parameters of the DMA engine device. */ if (host->dma_tx_channel) { struct device *dev = host->dma_tx_channel->device->dev; unsigned int max_seg_size = dma_get_max_seg_size(dev); if (max_seg_size < host->mmc->max_seg_size) host->mmc->max_seg_size = max_seg_size; } if (host->dma_rx_channel) { struct device *dev = host->dma_rx_channel->device->dev; unsigned int max_seg_size = dma_get_max_seg_size(dev); if (max_seg_size < host->mmc->max_seg_size) host->mmc->max_seg_size = max_seg_size; } if (host->ops && host->ops->dma_setup) host->ops->dma_setup(host); }
static bool dmatest_match_channel(struct dmatest_params *params, struct dma_chan *chan) { if (params->channel[0] == '\0') return true; return strcmp(dma_chan_name(chan), params->channel) == 0; }
/* * omap3isp_hist_init - Module Initialization. */ int omap3isp_hist_init(struct isp_device *isp) { struct ispstat *hist = &isp->isp_hist; struct omap3isp_hist_config *hist_cfg; int ret = -1; hist_cfg = devm_kzalloc(isp->dev, sizeof(*hist_cfg), GFP_KERNEL); if (hist_cfg == NULL) return -ENOMEM; hist->isp = isp; if (HIST_CONFIG_DMA) { struct platform_device *pdev = to_platform_device(isp->dev); struct resource *res; unsigned int sig = 0; dma_cap_mask_t mask; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "hist"); if (res) sig = res->start; hist->dma_ch = dma_request_slave_channel_compat_reason(mask, omap_dma_filter_fn, &sig, isp->dev, "hist"); if (IS_ERR(hist->dma_ch)) { ret = PTR_ERR(hist->dma_ch); if (ret == -EPROBE_DEFER) return ret; hist->dma_ch = NULL; dev_warn(isp->dev, "hist: DMA channel request failed, using PIO\n"); } else { dev_dbg(isp->dev, "hist: using DMA channel %s\n", dma_chan_name(hist->dma_ch)); } } hist->ops = &hist_ops; hist->priv = hist_cfg; hist->event_type = V4L2_EVENT_OMAP3ISP_HIST; ret = omap3isp_stat_init(hist, "histogram", &hist_subdev_ops); if (ret) { if (hist->dma_ch) dma_release_channel(hist->dma_ch); } return ret; }
static int tegra_adma_alloc_chan_resources(struct dma_chan *dc) { struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); int ret; ret = request_irq(tdc->irq, tegra_adma_isr, 0, dma_chan_name(dc), tdc); if (ret) { dev_err(tdc2dev(tdc), "failed to get interrupt for %s\n", dma_chan_name(dc)); return ret; } ret = pm_runtime_get_sync(tdc2dev(tdc)); if (ret < 0) { free_irq(tdc->irq, tdc); return ret; } dma_cookie_init(&tdc->vc.chan); return 0; }
static int tegra_adma_request_alloc(struct tegra_adma_chan *tdc, enum dma_transfer_direction direction) { struct tegra_adma *tdma = tdc->tdma; unsigned int sreq_index = tdc->sreq_index; if (tdc->sreq_reserved) return tdc->sreq_dir == direction ? 0 : -EINVAL; switch (direction) { case DMA_MEM_TO_DEV: if (sreq_index > ADMA_CH_CTRL_TX_REQ_MAX) { dev_err(tdma->dev, "invalid DMA request\n"); return -EINVAL; } if (test_and_set_bit(sreq_index, &tdma->tx_requests_reserved)) { dev_err(tdma->dev, "DMA request reserved\n"); return -EINVAL; } break; case DMA_DEV_TO_MEM: if (sreq_index > ADMA_CH_CTRL_RX_REQ_MAX) { dev_err(tdma->dev, "invalid DMA request\n"); return -EINVAL; } if (test_and_set_bit(sreq_index, &tdma->rx_requests_reserved)) { dev_err(tdma->dev, "DMA request reserved\n"); return -EINVAL; } break; default: dev_WARN(tdma->dev, "channel %s has invalid transfer type\n", dma_chan_name(&tdc->vc.chan)); return -EINVAL; } tdc->sreq_dir = direction; tdc->sreq_reserved = true; return 0; }
static void tegra_adma_request_free(struct tegra_adma_chan *tdc) { struct tegra_adma *tdma = tdc->tdma; if (!tdc->sreq_reserved) return; switch (tdc->sreq_dir) { case DMA_MEM_TO_DEV: clear_bit(tdc->sreq_index, &tdma->tx_requests_reserved); break; case DMA_DEV_TO_MEM: clear_bit(tdc->sreq_index, &tdma->rx_requests_reserved); break; default: dev_WARN(tdma->dev, "channel %s has invalid transfer type\n", dma_chan_name(&tdc->vc.chan)); return; } tdc->sreq_reserved = false; }
static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr) { int ret = 0; struct dma_slave_config slave_config; struct at91_twi_dma *dma = &dev->dma; enum dma_slave_buswidth addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; /* * The actual width of the access will be chosen in * dmaengine_prep_slave_sg(): * for each buffer in the scatter-gather list, if its size is aligned * to addr_width then addr_width accesses will be performed to transfer * the buffer. On the other hand, if the buffer size is not aligned to * addr_width then the buffer is transferred using single byte accesses. * Please refer to the Atmel eXtended DMA controller driver. * When FIFOs are used, the TXRDYM threshold can always be set to * trigger the XDMAC when at least 4 data can be written into the TX * FIFO, even if single byte accesses are performed. * However the RXRDYM threshold must be set to fit the access width, * deduced from buffer length, so the XDMAC is triggered properly to * read data from the RX FIFO. */ if (dev->fifo_size) addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; memset(&slave_config, 0, sizeof(slave_config)); slave_config.src_addr = (dma_addr_t)phy_addr + AT91_TWI_RHR; slave_config.src_addr_width = addr_width; slave_config.src_maxburst = 1; slave_config.dst_addr = (dma_addr_t)phy_addr + AT91_TWI_THR; slave_config.dst_addr_width = addr_width; slave_config.dst_maxburst = 1; slave_config.device_fc = false; dma->chan_tx = dma_request_slave_channel_reason(dev->dev, "tx"); if (IS_ERR(dma->chan_tx)) { ret = PTR_ERR(dma->chan_tx); dma->chan_tx = NULL; goto error; } dma->chan_rx = dma_request_slave_channel_reason(dev->dev, "rx"); if (IS_ERR(dma->chan_rx)) { ret = PTR_ERR(dma->chan_rx); dma->chan_rx = NULL; goto error; } slave_config.direction = DMA_MEM_TO_DEV; if (dmaengine_slave_config(dma->chan_tx, &slave_config)) { dev_err(dev->dev, "failed to configure tx channel\n"); ret = -EINVAL; goto error; } slave_config.direction = DMA_DEV_TO_MEM; if (dmaengine_slave_config(dma->chan_rx, &slave_config)) { dev_err(dev->dev, "failed to configure rx channel\n"); ret = -EINVAL; goto error; } sg_init_table(dma->sg, 2); dma->buf_mapped = false; dma->xfer_in_progress = false; dev->use_dma = true; dev_info(dev->dev, "using %s (tx) and %s (rx) for DMA transfers\n", dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx)); return ret; error: if (ret != -EPROBE_DEFER) dev_info(dev->dev, "can't get DMA channel, continue without DMA support\n"); if (dma->chan_rx) dma_release_channel(dma->chan_rx); if (dma->chan_tx) dma_release_channel(dma->chan_tx); return ret; }