static int mmp_tdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) { struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); struct dma_slave_config *dmaengine_cfg = (void *)arg; int ret = 0; switch (cmd) { case DMA_TERMINATE_ALL: mmp_tdma_disable_chan(tdmac); break; case DMA_PAUSE: mmp_tdma_pause_chan(tdmac); break; case DMA_RESUME: mmp_tdma_resume_chan(tdmac); break; case DMA_SLAVE_CONFIG: if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { tdmac->dev_addr = dmaengine_cfg->src_addr; tdmac->burst_sz = dmaengine_cfg->src_maxburst; tdmac->buswidth = dmaengine_cfg->src_addr_width; } else { tdmac->dev_addr = dmaengine_cfg->dst_addr; tdmac->burst_sz = dmaengine_cfg->dst_maxburst; tdmac->buswidth = dmaengine_cfg->dst_addr_width; } tdmac->dir = dmaengine_cfg->direction; return mmp_tdma_config_chan(tdmac); default: ret = -ENOSYS; } return ret; }
static dma_cookie_t mmp_tdma_tx_submit(struct dma_async_tx_descriptor *tx) { struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(tx->chan); mmp_tdma_chan_set_desc(tdmac, tdmac->desc_arr_phys); return 0; }
static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, unsigned long flags, void *context) { struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); struct mmp_tdma_desc *desc; int num_periods = buf_len / period_len; int i = 0, buf = 0; if (tdmac->status != DMA_SUCCESS) return NULL; if (period_len > TDMA_MAX_XFER_BYTES) { dev_err(tdmac->dev, "maximum period size exceeded: %d > %d\n", period_len, TDMA_MAX_XFER_BYTES); goto err_out; } tdmac->status = DMA_IN_PROGRESS; tdmac->desc_num = num_periods; desc = mmp_tdma_alloc_descriptor(tdmac); if (!desc) goto err_out; while (buf < buf_len) { desc = &tdmac->desc_arr[i]; if (i + 1 == num_periods) desc->nxt_desc = tdmac->desc_arr_phys; else desc->nxt_desc = tdmac->desc_arr_phys + sizeof(*desc) * (i + 1); if (direction == DMA_MEM_TO_DEV) { desc->src_addr = dma_addr; desc->dst_addr = tdmac->dev_addr; } else { desc->src_addr = tdmac->dev_addr; desc->dst_addr = dma_addr; } desc->byte_cnt = period_len; dma_addr += period_len; buf += period_len; i++; } tdmac->buf_len = buf_len; tdmac->period_len = period_len; tdmac->pos = 0; return &tdmac->desc; err_out: tdmac->status = DMA_ERROR; return NULL; }
static enum dma_status mmp_tdma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); dma_set_residue(txstate, tdmac->buf_len - tdmac->pos); return tdmac->status; }
static void mmp_tdma_free_chan_resources(struct dma_chan *chan) { struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); if (tdmac->irq) devm_free_irq(tdmac->dev, tdmac->irq, tdmac); mmp_tdma_free_descriptor(tdmac); return; }
static int mmp_tdma_terminate_all(struct dma_chan *chan) { struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); mmp_tdma_disable_chan(chan); /* disable interrupt */ mmp_tdma_enable_irq(tdmac, false); return 0; }
static int mmp_tdma_pause_chan(struct dma_chan *chan) { struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN, tdmac->reg_base + TDCR); tdmac->status = DMA_PAUSED; return 0; }
static int mmp_tdma_resume_chan(struct dma_chan *chan) { struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN, tdmac->reg_base + TDCR); tdmac->status = DMA_IN_PROGRESS; return 0; }
static enum dma_status mmp_tdma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); tdmac->pos = mmp_tdma_get_pos(tdmac); dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, tdmac->buf_len - tdmac->pos); return tdmac->status; }
static bool mmp_tdma_filter_fn(struct dma_chan *chan, void *fn_param) { struct mmp_tdma_filter_param *param = fn_param; struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); struct dma_device *pdma_device = tdmac->chan.device; if (pdma_device->dev->of_node != param->of_node) return false; if (chan->chan_id != param->chan_id) return false; return true; }
static int mmp_tdma_disable_chan(struct dma_chan *chan) { struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); u32 tdcr; tdcr = readl(tdmac->reg_base + TDCR); tdcr |= TDCR_ABR; tdcr &= ~TDCR_CHANEN; writel(tdcr, tdmac->reg_base + TDCR); tdmac->status = DMA_COMPLETE; return 0; }
static int mmp_tdma_alloc_chan_resources(struct dma_chan *chan) { struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); int ret; dma_async_tx_descriptor_init(&tdmac->desc, chan); tdmac->desc.tx_submit = mmp_tdma_tx_submit; if (tdmac->irq) { ret = devm_request_irq(tdmac->dev, tdmac->irq, mmp_tdma_chan_handler, IRQF_DISABLED, "tdma", tdmac); if (ret) return ret; } return 1; }
static int mmp_tdma_config(struct dma_chan *chan, struct dma_slave_config *dmaengine_cfg) { struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { tdmac->dev_addr = dmaengine_cfg->src_addr; tdmac->burst_sz = dmaengine_cfg->src_maxburst; tdmac->buswidth = dmaengine_cfg->src_addr_width; } else { tdmac->dev_addr = dmaengine_cfg->dst_addr; tdmac->burst_sz = dmaengine_cfg->dst_maxburst; tdmac->buswidth = dmaengine_cfg->dst_addr_width; } tdmac->dir = dmaengine_cfg->direction; return mmp_tdma_config_chan(chan); }
static void mmp_tdma_issue_pending(struct dma_chan *chan) { struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); mmp_tdma_enable_chan(tdmac); }
static int mmp_tdma_config_chan(struct dma_chan *chan) { struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); unsigned int tdcr = 0; mmp_tdma_disable_chan(chan); if (tdmac->dir == DMA_MEM_TO_DEV) tdcr = TDCR_DSTDIR_ADDR_HOLD | TDCR_SRCDIR_ADDR_INC; else if (tdmac->dir == DMA_DEV_TO_MEM) tdcr = TDCR_SRCDIR_ADDR_HOLD | TDCR_DSTDIR_ADDR_INC; if (tdmac->type == MMP_AUD_TDMA) { tdcr |= TDCR_PACKMOD; switch (tdmac->burst_sz) { case 4: tdcr |= TDCR_BURSTSZ_4B; break; case 8: tdcr |= TDCR_BURSTSZ_8B; break; case 16: tdcr |= TDCR_BURSTSZ_16B; break; case 32: tdcr |= TDCR_BURSTSZ_32B; break; case 64: tdcr |= TDCR_BURSTSZ_64B; break; case 128: tdcr |= TDCR_BURSTSZ_128B; break; default: dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n"); return -EINVAL; } switch (tdmac->buswidth) { case DMA_SLAVE_BUSWIDTH_1_BYTE: tdcr |= TDCR_SSZ_8_BITS; break; case DMA_SLAVE_BUSWIDTH_2_BYTES: tdcr |= TDCR_SSZ_16_BITS; break; case DMA_SLAVE_BUSWIDTH_4_BYTES: tdcr |= TDCR_SSZ_32_BITS; break; default: dev_err(tdmac->dev, "mmp_tdma: unknown bus size.\n"); return -EINVAL; } } else if (tdmac->type == PXA910_SQU) { tdcr |= TDCR_SSPMOD; switch (tdmac->burst_sz) { case 1: tdcr |= TDCR_BURSTSZ_SQU_1B; break; case 2: tdcr |= TDCR_BURSTSZ_SQU_2B; break; case 4: tdcr |= TDCR_BURSTSZ_SQU_4B; break; case 8: tdcr |= TDCR_BURSTSZ_SQU_8B; break; case 16: tdcr |= TDCR_BURSTSZ_SQU_16B; break; case 32: tdcr |= TDCR_BURSTSZ_SQU_32B; break; default: dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n"); return -EINVAL; } } writel(tdcr, tdmac->reg_base + TDCR); return 0; }