static struct dma_async_tx_descriptor *st_fdma_prep_dma_memcpy( struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, size_t len, unsigned long flags) { struct st_fdma_chan *fchan; struct st_fdma_desc *fdesc; struct st_fdma_hw_node *hw_node; if (!len) return NULL; fchan = to_st_fdma_chan(chan); /* We only require a single descriptor */ fdesc = st_fdma_alloc_desc(fchan, 1); if (!fdesc) { dev_err(fchan->fdev->dev, "no memory for desc\n"); return NULL; } hw_node = fdesc->node[0].desc; hw_node->next = 0; hw_node->control = FDMA_NODE_CTRL_REQ_MAP_FREE_RUN; hw_node->control |= FDMA_NODE_CTRL_SRC_INCR; hw_node->control |= FDMA_NODE_CTRL_DST_INCR; hw_node->control |= FDMA_NODE_CTRL_INT_EON; hw_node->nbytes = len; hw_node->saddr = src; hw_node->daddr = dst; hw_node->generic.length = len; hw_node->generic.sstride = 0; hw_node->generic.dstride = 0; return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags); }
static struct dma_async_tx_descriptor *st_fdma_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t buf_addr, size_t len, size_t period_len, enum dma_transfer_direction direction, unsigned long flags) { struct st_fdma_chan *fchan; struct st_fdma_desc *fdesc; int sg_len, i; fchan = st_fdma_prep_common(chan, len, direction); if (!fchan) return NULL; if (!period_len) return NULL; if (config_reqctrl(fchan, direction)) { dev_err(fchan->fdev->dev, "bad width or direction\n"); return NULL; } /* the buffer length must be a multiple of period_len */ if (len % period_len != 0) { dev_err(fchan->fdev->dev, "len is not multiple of period\n"); return NULL; } sg_len = len / period_len; fdesc = st_fdma_alloc_desc(fchan, sg_len); if (!fdesc) { dev_err(fchan->fdev->dev, "no memory for desc\n"); return NULL; } fdesc->iscyclic = true; for (i = 0; i < sg_len; i++) { struct st_fdma_hw_node *hw_node = fdesc->node[i].desc; hw_node->next = fdesc->node[(i + 1) % sg_len].pdesc; hw_node->control = FDMA_NODE_CTRL_REQ_MAP_DREQ(fchan->dreq_line); hw_node->control |= FDMA_NODE_CTRL_INT_EON; fill_hw_node(hw_node, fchan, direction); if (direction == DMA_MEM_TO_DEV) hw_node->saddr = buf_addr + (i * period_len); else hw_node->daddr = buf_addr + (i * period_len); hw_node->nbytes = period_len; hw_node->generic.length = period_len; } return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags); }
static struct dma_async_tx_descriptor *mdc_prep_dma_memcpy( struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags) { struct mdc_chan *mchan = to_mdc_chan(chan); struct mdc_dma *mdma = mchan->mdma; struct mdc_tx_desc *mdesc; struct mdc_hw_list_desc *curr, *prev = NULL; dma_addr_t curr_phys, prev_phys; if (!len) return NULL; mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT); if (!mdesc) return NULL; mdesc->chan = mchan; mdesc->list_xfer_size = len; while (len > 0) { size_t xfer_size; curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT, &curr_phys); if (!curr) goto free_desc; if (prev) { prev->node_addr = curr_phys; prev->next_desc = curr; } else { mdesc->list_phys = curr_phys; mdesc->list = curr; } xfer_size = min_t(size_t, mdma->max_xfer_size, len); mdc_list_desc_config(mchan, curr, DMA_MEM_TO_MEM, src, dest, xfer_size); prev = curr; prev_phys = curr_phys; mdesc->list_len++; src += xfer_size; dest += xfer_size; len -= xfer_size; } return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags); free_desc: mdc_desc_free(&mdesc->vd); return NULL; }
static struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, unsigned long flags) { struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); struct fsl_edma_desc *fsl_desc; dma_addr_t dma_buf_next; int sg_len, i; u32 src_addr, dst_addr, last_sg, nbytes; u16 soff, doff, iter; if (!is_slave_direction(fsl_chan->fsc.dir)) return NULL; sg_len = buf_len / period_len; fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len); if (!fsl_desc) return NULL; fsl_desc->iscyclic = true; dma_buf_next = dma_addr; nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst; iter = period_len / nbytes; for (i = 0; i < sg_len; i++) { if (dma_buf_next >= dma_addr + buf_len) dma_buf_next = dma_addr; /* get next sg's physical address */ last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd; if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) { src_addr = dma_buf_next; dst_addr = fsl_chan->fsc.dev_addr; soff = fsl_chan->fsc.addr_width; doff = 0; } else { src_addr = fsl_chan->fsc.dev_addr; dst_addr = dma_buf_next; soff = 0; doff = fsl_chan->fsc.addr_width; } fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd, src_addr, dst_addr, fsl_chan->fsc.attr, soff, nbytes, 0, iter, iter, doff, last_sg, true, false, true); dma_buf_next += period_len; } return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags); }
static struct dma_async_tx_descriptor *tegra_adma_prep_dma_cyclic( struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, unsigned long flags) { struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); struct tegra_adma_desc *desc = NULL; if (!buf_len || !period_len || period_len > ADMA_CH_TC_COUNT_MASK) { dev_err(tdc2dev(tdc), "invalid buffer/period len\n"); return NULL; } if (buf_len % period_len) { dev_err(tdc2dev(tdc), "buf_len not a multiple of period_len\n"); return NULL; } if (!IS_ALIGNED(buf_addr, 4)) { dev_err(tdc2dev(tdc), "invalid buffer alignment\n"); return NULL; } desc = kzalloc(sizeof(*desc), GFP_NOWAIT); if (!desc) return NULL; desc->buf_len = buf_len; desc->period_len = period_len; desc->num_periods = buf_len / period_len; if (tegra_adma_set_xfer_params(tdc, desc, buf_addr, direction)) { kfree(desc); return NULL; } return vchan_tx_prep(&tdc->vc, &desc->vd, flags); }
static struct dma_async_tx_descriptor *mdc_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction dir, unsigned long flags) { struct mdc_chan *mchan = to_mdc_chan(chan); struct mdc_dma *mdma = mchan->mdma; struct mdc_tx_desc *mdesc; struct mdc_hw_list_desc *curr, *prev = NULL; dma_addr_t curr_phys, prev_phys; if (!buf_len && !period_len) return NULL; if (!is_slave_direction(dir)) return NULL; if (mdc_check_slave_width(mchan, dir) < 0) return NULL; mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT); if (!mdesc) return NULL; mdesc->chan = mchan; mdesc->cyclic = true; mdesc->list_xfer_size = buf_len; mdesc->list_period_len = DIV_ROUND_UP(period_len, mdma->max_xfer_size); while (buf_len > 0) { size_t remainder = min(period_len, buf_len); while (remainder > 0) { size_t xfer_size; curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT, &curr_phys); if (!curr) goto free_desc; if (!prev) { mdesc->list_phys = curr_phys; mdesc->list = curr; } else { prev->node_addr = curr_phys; prev->next_desc = curr; } xfer_size = min_t(size_t, mdma->max_xfer_size, remainder); if (dir == DMA_MEM_TO_DEV) { mdc_list_desc_config(mchan, curr, dir, buf_addr, mchan->config.dst_addr, xfer_size); } else { mdc_list_desc_config(mchan, curr, dir, mchan->config.src_addr, buf_addr, xfer_size); } prev = curr; prev_phys = curr_phys; mdesc->list_len++; buf_addr += xfer_size; buf_len -= xfer_size; remainder -= xfer_size; } } prev->node_addr = mdesc->list_phys; return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags); free_desc: mdc_desc_free(&mdesc->vd); return NULL; }