static void snd_imx_dma_err_callback(int channel, void *data, int err) { struct snd_pcm_substream *substream = data; struct snd_soc_pcm_runtime *rtd = substream->private_data; struct imx_pcm_dma_params *dma_params = snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream); struct snd_pcm_runtime *runtime = substream->runtime; struct imx_pcm_runtime_data *iprtd = runtime->private_data; int ret; pr_err("DMA timeout on channel %d -%s%s%s%s\n", channel, err & IMX_DMA_ERR_BURST ? " burst" : "", err & IMX_DMA_ERR_REQUEST ? " request" : "", err & IMX_DMA_ERR_TRANSFER ? " transfer" : "", err & IMX_DMA_ERR_BUFFER ? " buffer" : ""); imx_dma_disable(iprtd->dma); ret = imx_dma_setup_sg(iprtd->dma, iprtd->sg_list, iprtd->sg_count, IMX_DMA_LENGTH_LOOP, dma_params->dma_addr, substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? DMA_MODE_WRITE : DMA_MODE_READ); if (!ret) imx_dma_enable(iprtd->dma); }
static int snd_imx_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_pcm_runtime *runtime = substream->runtime; struct imx_pcm_runtime_data *iprtd = runtime->private_data; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: imx_dma_enable(iprtd->dma); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: imx_dma_disable(iprtd->dma); break; default: return -EINVAL; } return 0; }
static void mx27_camera_dma_enable(struct mx2_camera_dev *pcdev) { u32 tmp; imx_dma_enable(pcdev->dma); tmp = readl(pcdev->base_csi + CSICR1); tmp |= CSICR1_RF_OR_INTEN; writel(tmp, pcdev->base_csi + CSICR1); }
static int imxmci_cmd_done(struct imxmci_host *host, unsigned int stat) { struct mmc_command *cmd = host->cmd; int i; u32 a,b,c; struct mmc_data *data = host->data; if (!cmd) return 0; host->cmd = NULL; if (stat & STATUS_TIME_OUT_RESP) { dev_dbg(mmc_dev(host->mmc), "CMD TIMEOUT\n"); cmd->error = MMC_ERR_TIMEOUT; } else if (stat & STATUS_RESP_CRC_ERR && cmd->flags & MMC_RSP_CRC) { dev_dbg(mmc_dev(host->mmc), "cmd crc error\n"); cmd->error = MMC_ERR_BADCRC; } if(cmd->flags & MMC_RSP_PRESENT) { if(cmd->flags & MMC_RSP_136) { for (i = 0; i < 4; i++) { u32 a = MMC_RES_FIFO & 0xffff; u32 b = MMC_RES_FIFO & 0xffff; cmd->resp[i] = a<<16 | b; } } else { a = MMC_RES_FIFO & 0xffff; b = MMC_RES_FIFO & 0xffff; c = MMC_RES_FIFO & 0xffff; cmd->resp[0] = a<<24 | b<<8 | c>>8; } } dev_dbg(mmc_dev(host->mmc), "RESP 0x%08x, 0x%08x, 0x%08x, 0x%08x, error %d\n", cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3], cmd->error); if (data && (cmd->error == MMC_ERR_NONE) && !(stat & STATUS_ERR_MASK)) { if (host->req->data->flags & MMC_DATA_WRITE) { /* Wait for FIFO to be empty before starting DMA write */ stat = MMC_STATUS; if(imxmci_busy_wait_for_status(host, &stat, STATUS_APPL_BUFF_FE, 40, "imxmci_cmd_done DMA WR") < 0) { cmd->error = MMC_ERR_FIFO; imxmci_finish_data(host, stat); if(host->req) imxmci_finish_request(host, host->req); dev_warn(mmc_dev(host->mmc), "STATUS = 0x%04x\n", stat); return 0; } if(test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) { imx_dma_enable(host->dma); } } } else { struct mmc_request *req; imxmci_stop_clock(host); req = host->req; if(data) imxmci_finish_data(host, stat); if( req ) { imxmci_finish_request(host, req); } else { dev_warn(mmc_dev(host->mmc), "imxmci_cmd_done: no request to finish\n"); } } return 1; }
static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data) { unsigned int nob = data->blocks; unsigned int blksz = data->blksz; unsigned int datasz = nob * blksz; int i; if (data->flags & MMC_DATA_STREAM) nob = 0xffff; host->data = data; data->bytes_xfered = 0; MMC_NOB = nob; MMC_BLK_LEN = blksz; /* * DMA cannot be used for small block sizes, we have to use CPU driven transfers otherwise. * We are in big troubles for non-512 byte transfers according to note in the paragraph * 20.6.7 of User Manual anyway, but we need to be able to transfer SCR at least. * The situation is even more complex in reality. The SDHC in not able to handle wll * partial FIFO fills and reads. The length has to be rounded up to burst size multiple. * This is required for SCR read at least. */ if (datasz < 512) { host->dma_size = datasz; if (data->flags & MMC_DATA_READ) { host->dma_dir = DMA_FROM_DEVICE; /* Hack to enable read SCR */ MMC_NOB = 1; MMC_BLK_LEN = 512; } else { host->dma_dir = DMA_TO_DEVICE; } /* Convert back to virtual address */ host->data_ptr = (u16*)(page_address(data->sg->page) + data->sg->offset); host->data_cnt = 0; clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events); set_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events); return; } if (data->flags & MMC_DATA_READ) { host->dma_dir = DMA_FROM_DEVICE; host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir); imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz, host->res->start + MMC_BUFFER_ACCESS_OFS, DMA_MODE_READ); /*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_READ, IMX_DMA_WIDTH_16, CCR_REN);*/ CCR(host->dma) = CCR_DMOD_LINEAR | CCR_DSIZ_32 | CCR_SMOD_FIFO | CCR_SSIZ_16 | CCR_REN; } else { host->dma_dir = DMA_TO_DEVICE; host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir); imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz, host->res->start + MMC_BUFFER_ACCESS_OFS, DMA_MODE_WRITE); /*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_WRITE, IMX_DMA_WIDTH_16, CCR_REN);*/ CCR(host->dma) = CCR_SMOD_LINEAR | CCR_SSIZ_32 | CCR_DMOD_FIFO | CCR_DSIZ_16 | CCR_REN; } #if 1 /* This code is there only for consistency checking and can be disabled in future */ host->dma_size = 0; for(i=0; i<host->dma_nents; i++) host->dma_size+=data->sg[i].length; if (datasz > host->dma_size) { dev_err(mmc_dev(host->mmc), "imxmci_setup_data datasz 0x%x > 0x%x dm_size\n", datasz, host->dma_size); } #endif host->dma_size = datasz; wmb(); if(host->actual_bus_width == MMC_BUS_WIDTH_4) BLR(host->dma) = 0; /* burst 64 byte read / 64 bytes write */ else BLR(host->dma) = 16; /* burst 16 byte read / 16 bytes write */ RSSR(host->dma) = DMA_REQ_SDHC; set_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events); clear_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events); /* start DMA engine for read, write is delayed after initial response */ if (host->dma_dir == DMA_FROM_DEVICE) { imx_dma_enable(host->dma); } }