static int omap_pcm_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct omap_runtime_data *prtd = runtime->private_data; struct omap_pcm_dma_data *dma_data = prtd->dma_data; struct omap_dma_channel_params dma_params; int bytes; /* return if this is a bufferless transfer e.g. * codec <--> BT codec or GSM modem -- lg FIXME */ if (!prtd->dma_data) return 0; memset(&dma_params, 0, sizeof(dma_params)); dma_params.data_type = dma_data->data_type; dma_params.trigger = dma_data->dma_req; dma_params.sync_mode = dma_data->sync_mode; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { dma_params.src_amode = OMAP_DMA_AMODE_POST_INC; dma_params.dst_amode = OMAP_DMA_AMODE_CONSTANT; dma_params.src_or_dst_synch = OMAP_DMA_DST_SYNC; dma_params.src_start = runtime->dma_addr; dma_params.dst_start = dma_data->port_addr; dma_params.dst_port = OMAP_DMA_PORT_MPUI; dma_params.dst_fi = dma_data->packet_size; } else { dma_params.src_amode = OMAP_DMA_AMODE_CONSTANT; dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC; dma_params.src_or_dst_synch = OMAP_DMA_SRC_SYNC; dma_params.src_start = dma_data->port_addr; dma_params.dst_start = runtime->dma_addr; dma_params.src_port = OMAP_DMA_PORT_MPUI; dma_params.src_fi = dma_data->packet_size; } /* * Set DMA transfer frame size equal to ALSA period size and frame * count as no. of ALSA periods. Then with DMA frame interrupt enabled, * we can transfer the whole ALSA buffer with single DMA transfer but * still can get an interrupt at each period bounary */ bytes = snd_pcm_lib_period_bytes(substream); dma_params.elem_count = bytes >> dma_data->data_type; dma_params.frame_count = runtime->periods; omap_set_dma_params(prtd->dma_ch, &dma_params); if ((cpu_is_omap1510())) omap_enable_dma_irq(prtd->dma_ch, OMAP_DMA_FRAME_IRQ | OMAP_DMA_LAST_IRQ | OMAP_DMA_BLOCK_IRQ); else omap_enable_dma_irq(prtd->dma_ch, OMAP_DMA_FRAME_IRQ); if (!(cpu_class_is_omap1())) { omap_set_dma_src_burst_mode(prtd->dma_ch, OMAP_DMA_DATA_BURST_16); omap_set_dma_dest_burst_mode(prtd->dma_ch, OMAP_DMA_DATA_BURST_16); } return 0; }
static int omap_pcm_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct omap_runtime_data *prtd = runtime->private_data; struct omap_pcm_dma_data *dma_data = prtd->dma_data; struct omap_dma_channel_params dma_params; int sync_mode; memset(&dma_params, 0, sizeof(dma_params)); /* TODO: Currently, MODE_ELEMENT == MODE_FRAME */ if (cpu_is_omap34xx() && (prtd->dma_op_mode == MCBSP_DMA_MODE_THRESHOLD)) sync_mode = OMAP_DMA_SYNC_FRAME; else sync_mode = OMAP_DMA_SYNC_ELEMENT; /* * Note: Regardless of interface data formats supported by OMAP McBSP * or EAC blocks, internal representation is always fixed 16-bit/sample */ dma_params.data_type = OMAP_DMA_DATA_TYPE_S16; dma_params.trigger = dma_data->dma_req; dma_params.sync_mode = sync_mode; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { dma_params.src_amode = OMAP_DMA_AMODE_POST_INC; dma_params.dst_amode = OMAP_DMA_AMODE_CONSTANT; dma_params.src_or_dst_synch = OMAP_DMA_DST_SYNC; dma_params.src_start = runtime->dma_addr; dma_params.dst_start = dma_data->port_addr; dma_params.dst_port = OMAP_DMA_PORT_MPUI; } else { dma_params.src_amode = OMAP_DMA_AMODE_CONSTANT; dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC; dma_params.src_or_dst_synch = OMAP_DMA_SRC_SYNC; dma_params.src_start = dma_data->port_addr; dma_params.dst_start = runtime->dma_addr; dma_params.src_port = OMAP_DMA_PORT_MPUI; } /* * Set DMA transfer frame size equal to ALSA period size and frame * count as no. of ALSA periods. Then with DMA frame interrupt enabled, * we can transfer the whole ALSA buffer with single DMA transfer but * still can get an interrupt at each period bounary */ dma_params.elem_count = snd_pcm_lib_period_bytes(substream) / 2; dma_params.frame_count = runtime->periods; omap_set_dma_params(prtd->dma_ch, &dma_params); omap_enable_dma_irq(prtd->dma_ch, OMAP_DMA_FRAME_IRQ); omap_set_dma_src_burst_mode(prtd->dma_ch, OMAP_DMA_DATA_BURST_16); omap_set_dma_dest_burst_mode(prtd->dma_ch, OMAP_DMA_DATA_BURST_16); return 0; }
static int abe_dbg_start_dma(struct omap_abe *abe, int circular) { struct omap_dma_channel_params dma_params; int err; /* start the DMA in either :- * * 1) circular buffer mode where the DMA will restart when it get to * the end of the buffer. * 2) default mode, where DMA stops at the end of the buffer. */ abe->debugfs.dma_req = OMAP44XX_DMA_ABE_REQ_7; err = omap_request_dma(abe->debugfs.dma_req, "ABE debug", abe_dbg_dma_irq, abe, &abe->debugfs.dma_ch); if (abe->debugfs.circular) { /* * Link channel with itself so DMA doesn't need any * reprogramming while looping the buffer */ omap_dma_link_lch(abe->debugfs.dma_ch, abe->debugfs.dma_ch); } memset(&dma_params, 0, sizeof(dma_params)); dma_params.data_type = OMAP_DMA_DATA_TYPE_S32; dma_params.trigger = abe->debugfs.dma_req; dma_params.sync_mode = OMAP_DMA_SYNC_FRAME; dma_params.src_amode = OMAP_DMA_AMODE_DOUBLE_IDX; dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC; dma_params.src_or_dst_synch = OMAP_DMA_SRC_SYNC; dma_params.src_start = OMAP_ABE_D_DEBUG_FIFO_ADDR + ABE_DEFAULT_BASE_ADDRESS_L3 + ABE_DMEM_BASE_OFFSET_MPU; dma_params.dst_start = abe->debugfs.buffer_addr; dma_params.src_port = OMAP_DMA_PORT_MPUI; dma_params.src_ei = 1; dma_params.src_fi = 1 - abe->debugfs.elem_bytes; /* 128 bytes shifted into words */ dma_params.elem_count = abe->debugfs.elem_bytes >> 2; dma_params.frame_count = abe->debugfs.buffer_bytes / abe->debugfs.elem_bytes; omap_set_dma_params(abe->debugfs.dma_ch, &dma_params); omap_enable_dma_irq(abe->debugfs.dma_ch, OMAP_DMA_FRAME_IRQ); omap_set_dma_src_burst_mode(abe->debugfs.dma_ch, OMAP_DMA_DATA_BURST_16); omap_set_dma_dest_burst_mode(abe->debugfs.dma_ch, OMAP_DMA_DATA_BURST_16); abe->debugfs.reader_offset = 0; pm_runtime_get_sync(abe->dev); omap_start_dma(abe->debugfs.dma_ch); return 0; }
static void omap1610_irda_start_rx_dma(struct omap1610_irda *si) { /* Configure DMA */ omap_set_dma_src_params(si->rx_dma_channel, 0x3, 0x0, (unsigned long)UART3_RHR); omap_enable_dma_irq(si->rx_dma_channel, 0x01); omap_set_dma_dest_params(si->rx_dma_channel, 0x0, 0x1, si->rx_buf_dma_phys); omap_set_dma_transfer_params(si->rx_dma_channel, 0x0, 4096, 0x1, 0x0); omap_start_dma(si->rx_dma_channel); }
static void omap_start_tx_dma(struct omap_irda *omap_ir, int size) { /* Configure DMA */ omap_set_dma_dest_params(omap_ir->tx_dma_channel, 0x03, 0x0, omap_ir->pdata->dest_start, 0, 0); omap_enable_dma_irq(omap_ir->tx_dma_channel, 0x01); omap_set_dma_src_params(omap_ir->tx_dma_channel, 0x0, 0x1, omap_ir->tx_buf_dma_phys, 0, 0); omap_set_dma_transfer_params(omap_ir->tx_dma_channel, 0x0, size, 0x1, 0x0, omap_ir->pdata->tx_trigger, 0); /* Start DMA */ omap_start_dma(omap_ir->tx_dma_channel); }
static void omap_irda_start_rx_dma(struct omap_irda *omap_ir) { /* Configure DMA */ omap_set_dma_src_params(omap_ir->rx_dma_channel, 0x3, 0x0, omap_ir->pdata->src_start, 0, 0); omap_enable_dma_irq(omap_ir->rx_dma_channel, 0x01); omap_set_dma_dest_params(omap_ir->rx_dma_channel, 0x0, 0x1, omap_ir->rx_buf_dma_phys, 0, 0); omap_set_dma_transfer_params(omap_ir->rx_dma_channel, 0x0, IRDA_SKB_MAX_MTU, 0x1, 0x0, omap_ir->pdata->rx_trigger, 0); omap_start_dma(omap_ir->rx_dma_channel); }
int omap3isp_hist_init(struct isp_device *isp) { struct ispstat *hist = &isp->isp_hist; struct omap3isp_hist_config *hist_cfg; int ret = -1; hist_cfg = kzalloc(sizeof(*hist_cfg), GFP_KERNEL); if (hist_cfg == NULL) return -ENOMEM; memset(hist, 0, sizeof(*hist)); if (HIST_CONFIG_DMA) ret = omap_request_dma(OMAP24XX_DMA_NO_DEVICE, "DMA_ISP_HIST", hist_dma_cb, hist, &hist->dma_ch); if (ret) { if (HIST_CONFIG_DMA) dev_warn(isp->dev, "hist: DMA request channel failed. " "Using PIO only.\n"); hist->dma_ch = -1; } else { dev_dbg(isp->dev, "hist: DMA channel = %d\n", hist->dma_ch); hist_dma_config(hist); omap_enable_dma_irq(hist->dma_ch, OMAP_DMA_BLOCK_IRQ); } hist->ops = &hist_ops; hist->priv = hist_cfg; hist->event_type = V4L2_EVENT_OMAP3ISP_HIST; hist->isp = isp; ret = omap3isp_stat_init(hist, "histogram", &hist_subdev_ops); if (ret) { kfree(hist_cfg); if (HIST_USING_DMA(hist)) omap_free_dma(hist->dma_ch); } return ret; }
static void omap1610_start_tx_dma(struct omap1610_irda *si, int size) { __ECHO_IN; /* Configure DMA */ omap_set_dma_dest_params(si->tx_dma_channel, 0x03, 0x0, (unsigned long)UART3_THR); omap_enable_dma_irq(si->tx_dma_channel, 0x01); omap_set_dma_src_params(si->tx_dma_channel, 0x0, 0x1, si->tx_buf_dma_phys); omap_set_dma_transfer_params(si->tx_dma_channel, 0x0, size, 0x1, 0x0); HDBG1(1); /* Start DMA */ omap_start_dma(si->tx_dma_channel); HDBG1(1); __ECHO_OUT; }
static int aes_dma_start(struct aes_hwa_ctx *ctx) { int err, fast = 0, in, out; size_t count; dma_addr_t addr_in, addr_out; struct omap_dma_channel_params dma_params; struct tf_crypto_aes_operation_state *state = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(ctx->req)); static size_t last_count; unsigned long flags; in = IS_ALIGNED((u32)ctx->in_sg->offset, sizeof(u32)); out = IS_ALIGNED((u32)ctx->out_sg->offset, sizeof(u32)); fast = in && out; if (fast) { count = min(ctx->total, sg_dma_len(ctx->in_sg)); count = min(count, sg_dma_len(ctx->out_sg)); if (count != ctx->total) return -EINVAL; /* Only call dma_map_sg if it has not yet been done */ if (!(ctx->req->base.flags & CRYPTO_TFM_REQ_DMA_VISIBLE)) { err = dma_map_sg(NULL, ctx->in_sg, 1, DMA_TO_DEVICE); if (!err) return -EINVAL; err = dma_map_sg(NULL, ctx->out_sg, 1, DMA_FROM_DEVICE); if (!err) { dma_unmap_sg( NULL, ctx->in_sg, 1, DMA_TO_DEVICE); return -EINVAL; } } ctx->req->base.flags &= ~CRYPTO_TFM_REQ_DMA_VISIBLE; addr_in = sg_dma_address(ctx->in_sg); addr_out = sg_dma_address(ctx->out_sg); ctx->flags |= FLAGS_FAST; } else { count = sg_copy(&ctx->in_sg, &ctx->in_offset, ctx->buf_in, ctx->buflen, ctx->total, 0); addr_in = ctx->dma_addr_in; addr_out = ctx->dma_addr_out; ctx->flags &= ~FLAGS_FAST; } ctx->total -= count; /* Configure HWA */ tf_crypto_enable_clock(PUBLIC_CRYPTO_AES1_CLOCK_REG); tf_aes_restore_registers(state, ctx->flags & FLAGS_ENCRYPT ? 1 : 0); OUTREG32(&paes_reg->AES_SYSCONFIG, INREG32(&paes_reg->AES_SYSCONFIG) | AES_SYSCONFIG_DMA_REQ_OUT_EN_BIT | AES_SYSCONFIG_DMA_REQ_IN_EN_BIT); ctx->dma_size = count; if (!fast) dma_sync_single_for_device(NULL, addr_in, count, DMA_TO_DEVICE); dma_params.data_type = OMAP_DMA_DATA_TYPE_S32; dma_params.frame_count = count / AES_BLOCK_SIZE; dma_params.elem_count = DMA_CEN_Elts_per_Frame_AES; dma_params.src_ei = 0; dma_params.src_fi = 0; dma_params.dst_ei = 0; dma_params.dst_fi = 0; dma_params.sync_mode = OMAP_DMA_SYNC_FRAME; dma_params.read_prio = 0; dma_params.write_prio = 0; /* IN */ dma_params.trigger = ctx->dma_in; dma_params.src_or_dst_synch = OMAP_DMA_DST_SYNC; dma_params.dst_start = AES1_REGS_HW_ADDR + 0x60; dma_params.dst_amode = OMAP_DMA_AMODE_CONSTANT; dma_params.src_start = addr_in; dma_params.src_amode = OMAP_DMA_AMODE_POST_INC; if (reconfigure_dma) { omap_set_dma_params(ctx->dma_lch_in, &dma_params); omap_set_dma_dest_burst_mode(ctx->dma_lch_in, OMAP_DMA_DATA_BURST_8); omap_set_dma_src_burst_mode(ctx->dma_lch_in, OMAP_DMA_DATA_BURST_8); omap_set_dma_src_data_pack(ctx->dma_lch_in, 1); } else { if (last_count != count) omap_set_dma_transfer_params(ctx->dma_lch_in, dma_params.data_type, dma_params.elem_count, dma_params.frame_count, dma_params.sync_mode, dma_params.trigger, dma_params.src_or_dst_synch); /* Configure input start address */ __raw_writel(dma_params.src_start, omap_dma_base + (0x60 * (ctx->dma_lch_in) + 0x9c)); } /* OUT */ dma_params.trigger = ctx->dma_out; dma_params.src_or_dst_synch = OMAP_DMA_SRC_SYNC; dma_params.src_start = AES1_REGS_HW_ADDR + 0x60; dma_params.src_amode = OMAP_DMA_AMODE_CONSTANT; dma_params.dst_start = addr_out; dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC; if (reconfigure_dma) { omap_set_dma_params(ctx->dma_lch_out, &dma_params); omap_set_dma_dest_burst_mode(ctx->dma_lch_out, OMAP_DMA_DATA_BURST_8); omap_set_dma_src_burst_mode(ctx->dma_lch_out, OMAP_DMA_DATA_BURST_8); omap_set_dma_dest_data_pack(ctx->dma_lch_out, 1); reconfigure_dma = false; } else { if (last_count != count) { omap_set_dma_transfer_params(ctx->dma_lch_out, dma_params.data_type, dma_params.elem_count, dma_params.frame_count, dma_params.sync_mode, dma_params.trigger, dma_params.src_or_dst_synch); last_count = count; } /* Configure output start address */ __raw_writel(dma_params.dst_start, omap_dma_base + (0x60 * (ctx->dma_lch_out) + 0xa0)); } /* Is this really needed? */ omap_enable_dma_irq(ctx->dma_lch_in, OMAP_DMA_BLOCK_IRQ); omap_enable_dma_irq(ctx->dma_lch_out, OMAP_DMA_BLOCK_IRQ); wmb(); omap_start_dma(ctx->dma_lch_in); omap_start_dma(ctx->dma_lch_out); spin_lock_irqsave(&ctx->lock, flags); if (ctx->next_req) { struct ablkcipher_request *req = ablkcipher_request_cast(ctx->next_req); if (!(ctx->next_req->flags & CRYPTO_TFM_REQ_DMA_VISIBLE)) { err = dma_map_sg(NULL, req->src, 1, DMA_TO_DEVICE); if (!err) { /* Silently fail for now... */ spin_unlock_irqrestore(&ctx->lock, flags); return 0; } err = dma_map_sg(NULL, req->dst, 1, DMA_FROM_DEVICE); if (!err) { dma_unmap_sg(NULL, req->src, 1, DMA_TO_DEVICE); /* Silently fail for now... */ spin_unlock_irqrestore(&ctx->lock, flags); return 0; } ctx->next_req->flags |= CRYPTO_TFM_REQ_DMA_VISIBLE; ctx->next_req = NULL; } } if (ctx->backlog) { ctx->backlog->complete(ctx->backlog, -EINPROGRESS); ctx->backlog = NULL; } spin_unlock_irqrestore(&ctx->lock, flags); return 0; }