static void jz4740_mmc_request_done(struct jz4740_mmc_host *host) { struct mmc_request *req; struct mmc_data *data; req = host->req; data = req->data; host->req = NULL; if (data && data->host_cookie == COOKIE_MAPPED) jz4740_mmc_dma_unmap(host, data); mmc_request_done(host->mmc, req); }
static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host, struct mmc_data *data) { int ret; struct dma_chan *chan; struct dma_async_tx_descriptor *desc; struct dma_slave_config conf = { .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, .src_maxburst = JZ4740_MMC_FIFO_HALF_SIZE, .dst_maxburst = JZ4740_MMC_FIFO_HALF_SIZE, }; if (data->flags & MMC_DATA_WRITE) { conf.direction = DMA_MEM_TO_DEV; conf.dst_addr = host->mem_res->start + JZ_REG_MMC_TXFIFO; conf.slave_id = JZ4740_DMA_TYPE_MMC_TRANSMIT; chan = host->dma_tx; } else { conf.direction = DMA_DEV_TO_MEM; conf.src_addr = host->mem_res->start + JZ_REG_MMC_RXFIFO; conf.slave_id = JZ4740_DMA_TYPE_MMC_RECEIVE; chan = host->dma_rx; } ret = jz4740_mmc_prepare_dma_data(host, data, NULL, chan); if (ret) return ret; dmaengine_slave_config(chan, &conf); desc = dmaengine_prep_slave_sg(chan, data->sg, host->sg_len, conf.direction, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc) { dev_err(mmc_dev(host->mmc), "Failed to allocate DMA %s descriptor", conf.direction == DMA_MEM_TO_DEV ? "TX" : "RX"); goto dma_unmap; } dmaengine_submit(desc); dma_async_issue_pending(chan); return 0; dma_unmap: jz4740_mmc_dma_unmap(host, data); return -ENOMEM; }
static void jz4740_mmc_post_request(struct mmc_host *mmc, struct mmc_request *mrq, int err) { struct jz4740_mmc_host *host = mmc_priv(mmc); struct mmc_data *data = mrq->data; if (data && data->host_cookie != COOKIE_UNMAPPED) jz4740_mmc_dma_unmap(host, data); if (err) { struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data); dmaengine_terminate_all(chan); } }