static int audio_start_dma_chain(struct audio_stream *s) { int channel = s->lch[s->dma_q_head]; FN_IN; if (!s->started) { s->hw_stop(); /* stops McBSP Interface */ omap_start_dma(channel); s->started = 1; s->hw_start(); /* start McBSP interface */ } else if (cpu_is_omap310()) omap_start_dma(channel); /* else the dma itself will progress forward with out our help */ FN_OUT(0); return 0; }
static void serial_omap_continue_tx(struct uart_omap_port *up) { struct circ_buf *xmit = &up->port.state->xmit; unsigned int start = up->uart_dma.tx_buf_dma_phys + (xmit->tail & (UART_XMIT_SIZE - 1)); if (uart_circ_empty(xmit)) return; up->uart_dma.tx_buf_size = uart_circ_chars_pending(xmit); /* * It is a circular buffer. See if the buffer has wounded back. * If yes it will have to be transferred in two separate dma * transfers */ if (start + up->uart_dma.tx_buf_size >= up->uart_dma.tx_buf_dma_phys + UART_XMIT_SIZE) up->uart_dma.tx_buf_size = (up->uart_dma.tx_buf_dma_phys + UART_XMIT_SIZE) - start; omap_set_dma_dest_params(up->uart_dma.tx_dma_channel, 0, OMAP_DMA_AMODE_CONSTANT, up->uart_dma.uart_base, 0, 0); omap_set_dma_src_params(up->uart_dma.tx_dma_channel, 0, OMAP_DMA_AMODE_POST_INC, start, 0, 0); omap_set_dma_transfer_params(up->uart_dma.tx_dma_channel, OMAP_DMA_DATA_TYPE_S8, up->uart_dma.tx_buf_size, 1, OMAP_DMA_SYNC_ELEMENT, up->uart_dma.uart_dma_tx, 0); /* FIXME: Cache maintenance needed here? */ omap_start_dma(up->uart_dma.tx_dma_channel); }
static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host, struct mmc_data *data, struct scatterlist *sgl) { int blksz, nblk, dma_ch; dma_ch = host->dma_ch; if (data->flags & MMC_DATA_WRITE) { omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, (host->mapbase + OMAP_HSMMC_DATA), 0, 0); omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC, sg_dma_address(sgl), 0, 0); } else { omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, (host->mapbase + OMAP_HSMMC_DATA), 0, 0); omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC, sg_dma_address(sgl), 0, 0); } blksz = host->data->blksz; nblk = sg_dma_len(sgl) / blksz; omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32, blksz / 4, nblk, OMAP_DMA_SYNC_FRAME, omap_hsmmc_get_dma_sync_dev(host, data), !(data->flags & MMC_DATA_WRITE)); omap_start_dma(dma_ch); }
static int serial_omap_start_rxdma(struct uart_omap_port *up) { int ret = 0; if (up->uart_dma.rx_dma_channel == -1) { pm_runtime_get_sync(&up->pdev->dev); ret = omap_request_dma(up->uart_dma.uart_dma_rx, "UART Rx DMA", (void *)uart_rx_dma_callback, up, &(up->uart_dma.rx_dma_channel)); if (ret < 0) return ret; omap_set_dma_src_params(up->uart_dma.rx_dma_channel, 0, OMAP_DMA_AMODE_CONSTANT, up->uart_dma.uart_base, 0, 0); omap_set_dma_dest_params(up->uart_dma.rx_dma_channel, 0, OMAP_DMA_AMODE_POST_INC, up->uart_dma.rx_buf_dma_phys, 0, 0); omap_set_dma_transfer_params(up->uart_dma.rx_dma_channel, OMAP_DMA_DATA_TYPE_S8, up->uart_dma.rx_buf_size, 1, OMAP_DMA_SYNC_ELEMENT, up->uart_dma.uart_dma_rx, 0); } up->uart_dma.prev_rx_dma_pos = up->uart_dma.rx_buf_dma_phys; omap_start_dma(up->uart_dma.rx_dma_channel); mod_timer(&up->uart_dma.rx_timer, jiffies + usecs_to_jiffies(up->uart_dma.rx_poll_rate)); up->uart_dma.rx_dma_used = true; return ret; }
static void serial_omap_continue_tx(struct uart_omap_port *up) { struct circ_buf *xmit = &up->port.state->xmit; unsigned int start = up->uart_dma.tx_buf_dma_phys + (xmit->tail & (UART_XMIT_SIZE - 1)); if (uart_circ_empty(xmit)) return; up->uart_dma.tx_buf_size = uart_circ_chars_pending(xmit); if (start + up->uart_dma.tx_buf_size >= up->uart_dma.tx_buf_dma_phys + UART_XMIT_SIZE) up->uart_dma.tx_buf_size = (up->uart_dma.tx_buf_dma_phys + UART_XMIT_SIZE) - start; omap_set_dma_dest_params(up->uart_dma.tx_dma_channel, 0, OMAP_DMA_AMODE_CONSTANT, up->uart_dma.uart_base, 0, 0); omap_set_dma_src_params(up->uart_dma.tx_dma_channel, 0, OMAP_DMA_AMODE_POST_INC, start, 0, 0); omap_set_dma_transfer_params(up->uart_dma.tx_dma_channel, OMAP_DMA_DATA_TYPE_S8, up->uart_dma.tx_buf_size, 1, OMAP_DMA_SYNC_ELEMENT, up->uart_dma.uart_dma_tx, 0); omap_start_dma(up->uart_dma.tx_dma_channel); }
/* A scatterlist segment completed */ static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data) { struct mmc_omap_host *host = (struct mmc_omap_host *) data; struct mmc_data *mmcdat = host->data; if (unlikely(host->dma_ch < 0)) { dev_err(mmc_dev(host->mmc), "DMA callback while DMA not enabled\n"); return; } /* FIXME: We really should do something to _handle_ the errors */ if (ch_status & OMAP1_DMA_TOUT_IRQ) { dev_err(mmc_dev(host->mmc),"DMA timeout\n"); return; } if (ch_status & OMAP_DMA_DROP_IRQ) { dev_err(mmc_dev(host->mmc), "DMA sync error\n"); return; } if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) { return; } mmcdat->bytes_xfered += host->dma_len; host->sg_idx++; if (host->sg_idx < host->sg_len) { mmc_omap_prepare_dma(host, host->data); omap_start_dma(host->dma_ch); } else mmc_omap_dma_done(host, host->data); }
static int omap_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_pcm_runtime *runtime = substream->runtime; struct omap_runtime_data *prtd = runtime->private_data; unsigned long flags; int ret = 0; spin_lock_irqsave(&prtd->lock, flags); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: prtd->period_index = 0; omap_start_dma(prtd->dma_ch); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: prtd->period_index = -1; omap_stop_dma(prtd->dma_ch); break; default: ret = -EINVAL; } spin_unlock_irqrestore(&prtd->lock, flags); return ret; }
static void serial_omap_start_tx(struct uart_port *port) { struct uart_omap_port *up = (struct uart_omap_port *)port; struct circ_buf *xmit; unsigned int start; int ret = 0; if (!up->use_dma) { serial_omap_enable_ier_thri(up); return; } if (up->uart_dma.tx_dma_used) return; xmit = &up->port.state->xmit; if (up->uart_dma.tx_dma_channel == OMAP_UART_DMA_CH_FREE) { ret = omap_request_dma(up->uart_dma.uart_dma_tx, "UART Tx DMA", (void *)uart_tx_dma_callback, up, &(up->uart_dma.tx_dma_channel)); if (ret < 0) { serial_omap_enable_ier_thri(up); return; } } spin_lock(&(up->uart_dma.tx_lock)); up->uart_dma.tx_dma_used = true; spin_unlock(&(up->uart_dma.tx_lock)); start = up->uart_dma.tx_buf_dma_phys + (xmit->tail & (UART_XMIT_SIZE - 1)); up->uart_dma.tx_buf_size = uart_circ_chars_pending(xmit); /* * It is a circular buffer. See if the buffer has wounded back. * If yes it will have to be transferred in two separate dma * transfers */ if (start + up->uart_dma.tx_buf_size >= up->uart_dma.tx_buf_dma_phys + UART_XMIT_SIZE) up->uart_dma.tx_buf_size = (up->uart_dma.tx_buf_dma_phys + UART_XMIT_SIZE) - start; omap_set_dma_dest_params(up->uart_dma.tx_dma_channel, 0, OMAP_DMA_AMODE_CONSTANT, up->uart_dma.uart_base, 0, 0); omap_set_dma_src_params(up->uart_dma.tx_dma_channel, 0, OMAP_DMA_AMODE_POST_INC, start, 0, 0); omap_set_dma_transfer_params(up->uart_dma.tx_dma_channel, OMAP_DMA_DATA_TYPE_S8, up->uart_dma.tx_buf_size, 1, OMAP_DMA_SYNC_ELEMENT, up->uart_dma.uart_dma_tx, 0); /* FIXME: Cache maintenance needed here? */ omap_start_dma(up->uart_dma.tx_dma_channel); }
static void serial_omap_continue_tx(struct uart_omap_port *up) { struct circ_buf *xmit = &up->port.info->xmit; int start = up->uart_dma.tx_buf_dma_phys + (xmit->tail & (UART_XMIT_SIZE - 1)); if (uart_circ_empty(xmit)) { return; } up->uart_dma.tx_buf_size = uart_circ_chars_pending(xmit); /* It is a circular buffer. See if the buffer has wounded back. * If yes it will have to be transferred in two separate dma * transfers */ if (start + up->uart_dma.tx_buf_size >= up->uart_dma.tx_buf_dma_phys + UART_XMIT_SIZE) up->uart_dma.tx_buf_size = (up->uart_dma.tx_buf_dma_phys + UART_XMIT_SIZE) - start; omap_set_dma_dest_params(up->uart_dma.tx_dma_channel, 0, OMAP_DMA_AMODE_CONSTANT, UART_BASE(up->pdev->id - 1), 0,0); omap_set_dma_src_params(up->uart_dma.tx_dma_channel, 0, OMAP_DMA_AMODE_POST_INC, start, 0,0); omap_set_dma_transfer_params(up->uart_dma.tx_dma_channel, OMAP_DMA_DATA_TYPE_S8, up->uart_dma.tx_buf_size, 1, OMAP_DMA_SYNC_ELEMENT, uart_dma_tx[(up->pdev->id)-1], 0); omap_start_dma(up->uart_dma.tx_dma_channel); }
static void serial_omap_start_rxdma(struct uart_omap_port *up) { #ifdef CONFIG_OMAP3_PM /* Disallow OCP bus idle. UART TX irqs are not seen during * bus idle. Alternative is to set kernel timer at fifo * drain rate. */ unsigned int tmp; tmp = (serial_in(up, UART_OMAP_SYSC) & 0x7) | (1 << 3); serial_out(up, UART_OMAP_SYSC, tmp); /* no-idle */ #endif if (up->uart_dma.rx_dma_channel == 0xFF) { omap_request_dma(uart_dma_rx[up->pdev->id-1],"UART Rx DMA", (void *)uart_rx_dma_callback,up, &(up->uart_dma.rx_dma_channel)); omap_set_dma_src_params(up->uart_dma.rx_dma_channel, 0, OMAP_DMA_AMODE_CONSTANT, UART_BASE(up->pdev->id - 1), 0, 0); omap_set_dma_dest_params(up->uart_dma.rx_dma_channel, 0, OMAP_DMA_AMODE_POST_INC, up->uart_dma.rx_buf_dma_phys, 0, 0); omap_set_dma_transfer_params(up->uart_dma.rx_dma_channel, OMAP_DMA_DATA_TYPE_S8, up->uart_dma.rx_buf_size, 1, OMAP_DMA_SYNC_ELEMENT, uart_dma_rx[up->pdev->id-1], 0); } up->uart_dma.prev_rx_dma_pos = up->uart_dma.rx_buf_dma_phys; omap_writel(0, OMAP34XX_DMA4_BASE + OMAP_DMA4_CDAC(up->uart_dma.rx_dma_channel)); omap_start_dma(up->uart_dma.rx_dma_channel); mod_timer(&up->uart_dma.rx_timer, jiffies + usecs_to_jiffies(up->uart_dma.rx_timeout)); up->uart_dma.rx_dma_state = 1; }
/* * ======== SCPY_start ======== * Start an SDMA transfer (configured previously) on this handle. */ void SCPY_start(IRES_SDMA_Handle handle) { int logicalChannel = handle->channel->chanNum; unsigned int dmaAddr = (unsigned int)(handle->channel->addr); SCPY_wait(handle); handle->channel->transferState.transferCompleted = 0; omap_start_dma(logicalChannel, dmaAddr); }
static void serial_omap_start_tx(struct uart_port *port) { struct uart_omap_port *up = (struct uart_omap_port *)port; #ifdef CONFIG_OMAP3_PM /* Disallow OCP bus idle. UART TX irqs are not seen during * bus idle. Alternative is to set kernel timer at fifo * drain rate. */ unsigned int tmp; tmp = (serial_in(up, UART_OMAP_SYSC) & 0x7) | (1 << 3); serial_out(up, UART_OMAP_SYSC, tmp); /* no-idle */ #endif if (up->use_dma && !(up->port.x_char)) { struct circ_buf *xmit = &up->port.info->xmit; unsigned int start = up->uart_dma.tx_buf_dma_phys + (xmit->tail & (UART_XMIT_SIZE - 1)); if (uart_circ_empty(xmit) || up->uart_dma.tx_dma_state) return; spin_lock(&(up->uart_dma.tx_lock)); up->uart_dma.tx_dma_state = 1; spin_unlock(&(up->uart_dma.tx_lock)); up->uart_dma.tx_buf_size = uart_circ_chars_pending(xmit); /* It is a circular buffer. See if the buffer has wounded back. * If yes it will have to be transferred in two separate dma * transfers */ if (start + up->uart_dma.tx_buf_size >= up->uart_dma.tx_buf_dma_phys + UART_XMIT_SIZE) up->uart_dma.tx_buf_size = (up->uart_dma.tx_buf_dma_phys + UART_XMIT_SIZE) - start; if (up->uart_dma.tx_dma_channel == 0xFF) omap_request_dma(uart_dma_tx[up->pdev->id-1], "UART Tx DMA", (void *)uart_tx_dma_callback, up, &(up->uart_dma.tx_dma_channel)); omap_set_dma_dest_params(up->uart_dma.tx_dma_channel, 0, OMAP_DMA_AMODE_CONSTANT, UART_BASE(up->pdev->id - 1), 0, 0); omap_set_dma_src_params(up->uart_dma.tx_dma_channel, 0, OMAP_DMA_AMODE_POST_INC, start, 0, 0); omap_set_dma_transfer_params(up->uart_dma.tx_dma_channel, OMAP_DMA_DATA_TYPE_S8, up->uart_dma.tx_buf_size, 1, OMAP_DMA_SYNC_ELEMENT, uart_dma_tx[(up->pdev->id)-1], 0); omap_start_dma(up->uart_dma.tx_dma_channel); } else if (!(up->ier & UART_IER_THRI)) { up->ier |= UART_IER_THRI; serial_out(up, UART_IER, up->ier); } }
static int abe_dbg_start_dma(struct omap_abe *abe, int circular) { struct omap_dma_channel_params dma_params; int err; /* start the DMA in either :- * * 1) circular buffer mode where the DMA will restart when it get to * the end of the buffer. * 2) default mode, where DMA stops at the end of the buffer. */ abe->debugfs.dma_req = OMAP44XX_DMA_ABE_REQ_7; err = omap_request_dma(abe->debugfs.dma_req, "ABE debug", abe_dbg_dma_irq, abe, &abe->debugfs.dma_ch); if (abe->debugfs.circular) { /* * Link channel with itself so DMA doesn't need any * reprogramming while looping the buffer */ omap_dma_link_lch(abe->debugfs.dma_ch, abe->debugfs.dma_ch); } memset(&dma_params, 0, sizeof(dma_params)); dma_params.data_type = OMAP_DMA_DATA_TYPE_S32; dma_params.trigger = abe->debugfs.dma_req; dma_params.sync_mode = OMAP_DMA_SYNC_FRAME; dma_params.src_amode = OMAP_DMA_AMODE_DOUBLE_IDX; dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC; dma_params.src_or_dst_synch = OMAP_DMA_SRC_SYNC; dma_params.src_start = OMAP_ABE_D_DEBUG_FIFO_ADDR + ABE_DEFAULT_BASE_ADDRESS_L3 + ABE_DMEM_BASE_OFFSET_MPU; dma_params.dst_start = abe->debugfs.buffer_addr; dma_params.src_port = OMAP_DMA_PORT_MPUI; dma_params.src_ei = 1; dma_params.src_fi = 1 - abe->debugfs.elem_bytes; /* 128 bytes shifted into words */ dma_params.elem_count = abe->debugfs.elem_bytes >> 2; dma_params.frame_count = abe->debugfs.buffer_bytes / abe->debugfs.elem_bytes; omap_set_dma_params(abe->debugfs.dma_ch, &dma_params); omap_enable_dma_irq(abe->debugfs.dma_ch, OMAP_DMA_FRAME_IRQ); omap_set_dma_src_burst_mode(abe->debugfs.dma_ch, OMAP_DMA_DATA_BURST_16); omap_set_dma_dest_burst_mode(abe->debugfs.dma_ch, OMAP_DMA_DATA_BURST_16); abe->debugfs.reader_offset = 0; pm_runtime_get_sync(abe->dev); omap_start_dma(abe->debugfs.dma_ch); return 0; }
static void mmc_omap_start_request(struct mmc_omap_host *host, struct mmc_request *req) { BUG_ON(host->mrq != NULL); host->mrq = req; /* only touch fifo AFTER the controller readies it */ mmc_omap_prepare_data(host, req); mmc_omap_start_command(host, req->cmd); if (host->dma_in_use) omap_start_dma(host->dma_ch); }
static void omap1610_irda_start_rx_dma(struct omap1610_irda *si) { /* Configure DMA */ omap_set_dma_src_params(si->rx_dma_channel, 0x3, 0x0, (unsigned long)UART3_RHR); omap_enable_dma_irq(si->rx_dma_channel, 0x01); omap_set_dma_dest_params(si->rx_dma_channel, 0x0, 0x1, si->rx_buf_dma_phys); omap_set_dma_transfer_params(si->rx_dma_channel, 0x0, 4096, 0x1, 0x0); omap_start_dma(si->rx_dma_channel); }
static int omap_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_pcm_runtime *runtime = substream->runtime; struct omap_runtime_data *prtd = runtime->private_data; struct omap_pcm_dma_data *dma_data = prtd->dma_data; unsigned long flags; int ret = 0; spin_lock_irqsave(&prtd->lock, flags); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: prtd->period_index = 0; /* Configure McBSP internal buffer usage */ if (dma_data->set_threshold) dma_data->set_threshold(substream); omap_start_dma(prtd->dma_ch); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: prtd->period_index = -1; omap_stop_dma(prtd->dma_ch); #if 0 // orig /* Since we are using self linking, there is a chance that the DMA as re-enabled the channel just after disabling it */ while (omap_get_dma_active_status(prtd->dma_ch)) omap_stop_dma(prtd->dma_ch); #else // [email protected], TI patch error in the recording if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { /* Fix: Ensure that the DMA channel is stopped for self linked audio DMA channel */ while (omap_get_dma_active_status(prtd->dma_ch)) omap_stop_dma(prtd->dma_ch); } #endif break; default: ret = -EINVAL; } spin_unlock_irqrestore(&prtd->lock, flags); return ret; }
static void omap_start_tx_dma(struct omap_irda *omap_ir, int size) { /* Configure DMA */ omap_set_dma_dest_params(omap_ir->tx_dma_channel, 0x03, 0x0, omap_ir->pdata->dest_start, 0, 0); omap_enable_dma_irq(omap_ir->tx_dma_channel, 0x01); omap_set_dma_src_params(omap_ir->tx_dma_channel, 0x0, 0x1, omap_ir->tx_buf_dma_phys, 0, 0); omap_set_dma_transfer_params(omap_ir->tx_dma_channel, 0x0, size, 0x1, 0x0, omap_ir->pdata->tx_trigger, 0); /* Start DMA */ omap_start_dma(omap_ir->tx_dma_channel); }
static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area, const unsigned char *buffer, int offset, size_t count) { struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd); struct onenand_chip *this = mtd->priv; dma_addr_t dma_src, dma_dst; int bram_offset; bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset; /* DMA is not used. Revisit PM requirements before enabling it. */ if (1 || (c->dma_channel < 0) || ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) || (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) { memcpy((__force void *)(this->base + bram_offset), buffer, count); return 0; } dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count, DMA_TO_DEVICE); dma_dst = c->phys_base + bram_offset; if (dma_mapping_error(&c->pdev->dev, dma_src)) { dev_err(&c->pdev->dev, "Couldn't DMA map a %d byte buffer\n", count); return -1; } omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S16, count / 2, 1, 0, 0, 0); omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC, dma_src, 0, 0); omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC, dma_dst, 0, 0); INIT_COMPLETION(c->dma_done); omap_start_dma(c->dma_channel); wait_for_completion(&c->dma_done); dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE); return 0; }
static void omap_irda_start_rx_dma(struct omap_irda *omap_ir) { /* Configure DMA */ omap_set_dma_src_params(omap_ir->rx_dma_channel, 0x3, 0x0, omap_ir->pdata->src_start, 0, 0); omap_enable_dma_irq(omap_ir->rx_dma_channel, 0x01); omap_set_dma_dest_params(omap_ir->rx_dma_channel, 0x0, 0x1, omap_ir->rx_buf_dma_phys, 0, 0); omap_set_dma_transfer_params(omap_ir->rx_dma_channel, 0x0, IRDA_SKB_MAX_MTU, 0x1, 0x0, omap_ir->pdata->rx_trigger, 0); omap_start_dma(omap_ir->rx_dma_channel); }
static int omap_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_pcm_runtime *runtime = substream->runtime; struct omap_runtime_data *prtd = runtime->private_data; struct omap_pcm_dma_data *dma_data = prtd->dma_data; unsigned long flags; int ret = 0; spin_lock_irqsave(&prtd->lock, flags); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: prtd->period_index = 0; /* Configure McBSP internal buffer usage */ if (dma_data->set_threshold) dma_data->set_threshold(substream); omap_start_dma(prtd->dma_ch); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: prtd->period_index = -1; omap_stop_dma(prtd->dma_ch); if (cpu_is_omap44xx()) { /* Since we are using self linking, there is a chance that the DMA as re-enabled the channel just after disabling it */ while (omap_get_dma_active_status(prtd->dma_ch)) omap_stop_dma(prtd->dma_ch); } break; default: ret = -EINVAL; } spin_unlock_irqrestore(&prtd->lock, flags); return ret; }
static void serial_omap_rxdma_poll(unsigned long uart_no) { struct uart_omap_port *up = ui[uart_no]; unsigned int curr_dma_pos, curr_transmitted_size; curr_dma_pos = omap_get_dma_dst_pos(up->uart_dma.rx_dma_channel); if ((curr_dma_pos == up->uart_dma.prev_rx_dma_pos) || (curr_dma_pos == 0)) { if (jiffies_to_msecs(jiffies - up->port_activity) < up->uart_dma.rx_timeout) { mod_timer(&up->uart_dma.rx_timer, jiffies + usecs_to_jiffies(up->uart_dma.rx_poll_rate)); } else { serial_omap_stop_rxdma(up); up->ier |= UART_IER_RDI; serial_out(up, UART_IER, up->ier); } return; } curr_transmitted_size = curr_dma_pos - up->uart_dma.prev_rx_dma_pos; up->port.icount.rx += curr_transmitted_size; tty_insert_flip_string(up->port.state->port.tty, up->uart_dma.rx_buf + (up->uart_dma.prev_rx_dma_pos - up->uart_dma.rx_buf_dma_phys), curr_transmitted_size); tty_flip_buffer_push(up->port.state->port.tty); if (up->uart_dma.rx_buf_size + up->uart_dma.rx_buf_dma_phys == curr_dma_pos) { omap_start_dma(up->uart_dma.rx_dma_channel); up->uart_dma.prev_rx_dma_pos = up->uart_dma.rx_buf_dma_phys; } else up->uart_dma.prev_rx_dma_pos = curr_dma_pos; mod_timer(&up->uart_dma.rx_timer, jiffies + usecs_to_jiffies(up->uart_dma.rx_poll_rate)); up->port_activity = jiffies; }
static int hist_buf_dma(struct ispstat *hist) { dma_addr_t dma_addr = hist->active_buf->dma_addr; if (unlikely(!dma_addr)) { dev_dbg(hist->isp->dev, "hist: invalid DMA buffer address\n"); hist_reset_mem(hist); return STAT_NO_BUF; } isp_reg_writel(hist->isp, 0, OMAP3_ISP_IOMEM_HIST, ISPHIST_ADDR); isp_reg_set(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, ISPHIST_CNT_CLEAR); omap3isp_flush(hist->isp); hist->dma_config.dst_start = dma_addr; hist->dma_config.elem_count = hist->buf_size / sizeof(u32); omap_set_dma_params(hist->dma_ch, &hist->dma_config); omap_start_dma(hist->dma_ch); return STAT_BUF_WAITING_DMA; }
static void omap1610_start_tx_dma(struct omap1610_irda *si, int size) { __ECHO_IN; /* Configure DMA */ omap_set_dma_dest_params(si->tx_dma_channel, 0x03, 0x0, (unsigned long)UART3_THR); omap_enable_dma_irq(si->tx_dma_channel, 0x01); omap_set_dma_src_params(si->tx_dma_channel, 0x0, 0x1, si->tx_buf_dma_phys); omap_set_dma_transfer_params(si->tx_dma_channel, 0x0, size, 0x1, 0x0); HDBG1(1); /* Start DMA */ omap_start_dma(si->tx_dma_channel); HDBG1(1); __ECHO_OUT; }
static void start_capture(struct omap1_cam_dev *pcdev) { struct omap1_cam_buf *buf = pcdev->active; u32 ctrlclock = CAM_READ_CACHE(pcdev, CTRLCLOCK); u32 mode = CAM_READ_CACHE(pcdev, MODE) & ~EN_V_DOWN; if (WARN_ON(!buf)) return; /* * Enable start of frame interrupt, which we will use for activating * our end of frame watchdog when capture actually starts. */ mode |= EN_V_UP; if (unlikely(ctrlclock & LCLK_EN)) /* stop pixel clock before FIFO reset */ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock & ~LCLK_EN); /* reset FIFO */ CAM_WRITE(pcdev, MODE, mode | RAZ_FIFO); omap_start_dma(pcdev->dma_ch); if (pcdev->vb_mode == OMAP1_CAM_DMA_SG) { /* * In SG mode, it's a good moment for fetching next sgbuf * from the current sglist and, if available, already putting * its parameters into the DMA programming register set. */ try_next_sgbuf(pcdev->dma_ch, buf); } /* (re)enable pixel clock */ CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock | LCLK_EN); /* release FIFO reset */ CAM_WRITE(pcdev, MODE, mode); }
static void omap_pcm_dma_irq(int ch, u16 stat, void *data) { struct snd_pcm_substream *substream = data; struct snd_pcm_runtime *runtime = substream->runtime; struct omap_runtime_data *prtd = runtime->private_data; unsigned long flags; if (cpu_is_omap1510()) { /* * OMAP1510 doesn't support DMA chaining so have to restart * the transfer after all periods are transferred */ spin_lock_irqsave(&prtd->lock, flags); if (prtd->period_index >= 0) { if (++prtd->period_index == runtime->periods) { prtd->period_index = 0; omap_start_dma(prtd->dma_ch); } } spin_unlock_irqrestore(&prtd->lock, flags); } snd_pcm_period_elapsed(substream); }
static int serial_omap_start_rxdma(struct uart_omap_port *up) { int ret = 0; if (up->uart_dma.rx_dma_channel == -1) { ret = omap_request_dma(up->uart_dma.uart_dma_rx, "UART Rx DMA", (void *)uart_rx_dma_callback, up, &(up->uart_dma.rx_dma_channel)); if (ret < 0) return ret; omap_set_dma_src_params(up->uart_dma.rx_dma_channel, 0, OMAP_DMA_AMODE_CONSTANT, up->uart_dma.uart_base, 0, 0); omap_set_dma_dest_params(up->uart_dma.rx_dma_channel, 0, OMAP_DMA_AMODE_POST_INC, up->uart_dma.rx_buf_dma_phys, 0, 0); omap_set_dma_transfer_params(up->uart_dma.rx_dma_channel, OMAP_DMA_DATA_TYPE_S8, up->uart_dma.rx_buf_size, 1, OMAP_DMA_SYNC_ELEMENT, up->uart_dma.uart_dma_rx, 0); } up->uart_dma.prev_rx_dma_pos = up->uart_dma.rx_buf_dma_phys; /* FIXME: Cache maintenance needed here? */ omap_start_dma(up->uart_dma.rx_dma_channel); mod_timer(&up->uart_dma.rx_timer, jiffies + usecs_to_jiffies(up->uart_dma.rx_poll_rate)); up->uart_dma.rx_dma_used = true; if (up->plat_hold_wakelock) (up->plat_hold_wakelock(up, WAKELK_RX)); return ret; }
static void omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, unsigned int u32_count, int is_write) { const int block_size = 16; unsigned int block_count, len; int dma_ch; unsigned long fifo_reg, timeout, jiffies_before, jiffies_spent; static unsigned long max_jiffies = 0; dma_ch = omap_nand_dma_ch; block_count = u32_count * 4 / block_size; nand_write_reg(NND_STATUS, 0x0f); nand_write_reg(NND_FIFOCTRL, (block_size << 24) | block_count); fifo_reg = NAND_BASE + NND_FIFO; if (is_write) { omap_set_dma_dest_params(dma_ch, OMAP_DMA_PORT_TIPB, OMAP_DMA_AMODE_CONSTANT, fifo_reg, 0, 0); omap_set_dma_src_params(dma_ch, OMAP_DMA_PORT_EMIFF, OMAP_DMA_AMODE_POST_INC, virt_to_phys(addr), 0, 0); // omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4); /* Set POSTWRITE bit */ nand_write_reg(NND_CTRL, nand_read_reg(NND_CTRL) | (1 << 16)); } else { omap_set_dma_src_params(dma_ch, OMAP_DMA_PORT_TIPB, OMAP_DMA_AMODE_CONSTANT, fifo_reg, 0, 0); omap_set_dma_dest_params(dma_ch, OMAP_DMA_PORT_EMIFF, OMAP_DMA_AMODE_POST_INC, virt_to_phys(addr), 0, 0); // omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_8); /* Set PREFETCH bit */ nand_write_reg(NND_CTRL, nand_read_reg(NND_CTRL) | (1 << 17)); } omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32, block_size / 4, block_count, OMAP_DMA_SYNC_FRAME, 0, 0); init_completion(&omap_nand_dma_comp); len = u32_count << 2; dma_cache_maint(addr, len, DMA_TO_DEVICE); omap_start_dma(dma_ch); jiffies_before = jiffies; timeout = wait_for_completion_timeout(&omap_nand_dma_comp, msecs_to_jiffies(1000)); jiffies_spent = (unsigned long)((long)jiffies - (long)jiffies_before); if (jiffies_spent > max_jiffies) max_jiffies = jiffies_spent; if (timeout == 0) { printk(KERN_WARNING "omap-hw-nand: DMA timeout after %u ms, max. seen latency %u ms\n", jiffies_to_msecs(jiffies_spent), jiffies_to_msecs(max_jiffies)); } if (!is_write) dma_cache_maint(addr, len, DMA_FROM_DEVICE); nand_write_reg(NND_CTRL, nand_read_reg(NND_CTRL) & ~((1 << 16) | (1 << 17))); }
/* * Routine to configure and start DMA for the MMC card */ static int mmc_omap_start_dma_transfer(struct mmc_omap_host *host, struct mmc_request *req) { int sync_dev, sync_dir = 0; int dma_ch = 0, ret = 0, err = 1; struct mmc_data *data = req->data; /* * If for some reason the DMA transfer is still active, * we wait for timeout period and free the dma */ if (host->dma_ch != -1) { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(100); if (down_trylock(&host->sem)) { omap_free_dma(host->dma_ch); host->dma_ch = -1; up(&host->sem); return err; } } else { if (down_trylock(&host->sem)) return err; } if (!(data->flags & MMC_DATA_WRITE)) { host->dma_dir = DMA_FROM_DEVICE; if (host->id == OMAP_MMC1_DEVID) sync_dev = OMAP24XX_DMA_MMC1_RX; else sync_dev = OMAP24XX_DMA_MMC2_RX; } else { host->dma_dir = DMA_TO_DEVICE; if (host->id == OMAP_MMC1_DEVID) sync_dev = OMAP24XX_DMA_MMC1_TX; else sync_dev = OMAP24XX_DMA_MMC2_TX; } ret = omap_request_dma(sync_dev, "MMC/SD", mmc_omap_dma_cb, host, &dma_ch); if (ret != 0) { dev_dbg(mmc_dev(host->mmc), "%s: omap_request_dma() failed with %d\n", mmc_hostname(host->mmc), ret); return ret; } host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir); host->dma_ch = dma_ch; if (!(data->flags & MMC_DATA_WRITE)) mmc_omap_config_dma_param(1, host, data); else mmc_omap_config_dma_param(0, host, data); if ((data->blksz % 4) == 0) omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32, (data->blksz / 4), data->blocks, OMAP_DMA_SYNC_FRAME, sync_dev, sync_dir); else /* REVISIT: The MMC buffer increments only when MSB is written. * Return error for blksz which is non multiple of four. */ return -EINVAL; omap_start_dma(dma_ch); return 0; }
static void serial_omap_start_tx(struct uart_port *port) { struct uart_omap_port *up = (struct uart_omap_port *)port; struct omap_uart_port_info *pdata = up->pdev->dev.platform_data; struct circ_buf *xmit; unsigned int start; int ret = 0; if (!up->use_dma) { pm_runtime_get_sync(&up->pdev->dev); serial_omap_enable_ier_thri(up); if (pdata && pdata->set_noidle) pdata->set_noidle(up->pdev); pm_runtime_mark_last_busy(&up->pdev->dev); pm_runtime_put_autosuspend(&up->pdev->dev); return; } if (up->uart_dma.tx_dma_used) return; xmit = &up->port.state->xmit; if (up->uart_dma.tx_dma_channel == OMAP_UART_DMA_CH_FREE) { pm_runtime_get_sync(&up->pdev->dev); ret = omap_request_dma(up->uart_dma.uart_dma_tx, "UART Tx DMA", (void *)uart_tx_dma_callback, up, &(up->uart_dma.tx_dma_channel)); if (ret < 0) { serial_omap_enable_ier_thri(up); return; } } spin_lock(&(up->uart_dma.tx_lock)); up->uart_dma.tx_dma_used = true; spin_unlock(&(up->uart_dma.tx_lock)); start = up->uart_dma.tx_buf_dma_phys + (xmit->tail & (UART_XMIT_SIZE - 1)); up->uart_dma.tx_buf_size = uart_circ_chars_pending(xmit); if (start + up->uart_dma.tx_buf_size >= up->uart_dma.tx_buf_dma_phys + UART_XMIT_SIZE) up->uart_dma.tx_buf_size = (up->uart_dma.tx_buf_dma_phys + UART_XMIT_SIZE) - start; omap_set_dma_dest_params(up->uart_dma.tx_dma_channel, 0, OMAP_DMA_AMODE_CONSTANT, up->uart_dma.uart_base, 0, 0); omap_set_dma_src_params(up->uart_dma.tx_dma_channel, 0, OMAP_DMA_AMODE_POST_INC, start, 0, 0); omap_set_dma_transfer_params(up->uart_dma.tx_dma_channel, OMAP_DMA_DATA_TYPE_S8, up->uart_dma.tx_buf_size, 1, OMAP_DMA_SYNC_ELEMENT, up->uart_dma.uart_dma_tx, 0); omap_start_dma(up->uart_dma.tx_dma_channel); }
static void OMAPLFBFliepNoLock_HDMI(OMAPLFB_SWAPCHAIN *psSwapChain, OMAPLFB_DEVINFO *psDevInfo, struct omapfb_info *ofbi, struct fb_info *framebuffer, unsigned long fb_offset) { struct omap_overlay *ovl_hdmi; struct omap_overlay_info info; struct omap_overlay *hdmi; struct omap_overlay_manager *manager; bool overlay_change_requested = false; enum omap_dss_overlay_s3d_type s3d_type_in_video_hdmi = omap_dss_overlay_s3d_none; ovl_hdmi = omap_dss_get_overlay(3); if(ovl_hdmi->info.enabled) s3d_type_in_video_hdmi = ovl_hdmi->info.s3d_type; hdmi = psSwapChain->stHdmiTiler.overlay; manager = hdmi->manager; hdmi->get_overlay_info(hdmi, &info); //not good... if ( omap_overlay_info_req[hdmi->id].status==2 ) { info.enabled = omap_overlay_info_req[hdmi->id].enabled; info.rotation = omap_overlay_info_req[hdmi->id].rotation; info.pos_x = omap_overlay_info_req[hdmi->id].pos_x; info.pos_y = omap_overlay_info_req[hdmi->id].pos_y; info.out_width = omap_overlay_info_req[hdmi->id].out_width; info.out_height = omap_overlay_info_req[hdmi->id].out_height; info.global_alpha = omap_overlay_info_req[hdmi->id].global_alpha; info.zorder = omap_overlay_info_req[hdmi->id].zorder; printk("GUI HDMI layer change requested. req_enabled(%d)\n", omap_overlay_info_req[hdmi->id].enabled); omap_overlay_info_req[hdmi->id].status = 0; overlay_change_requested = true; } if ( info.enabled ) { mutex_lock(&psSwapChain->stHdmiTiler.lock); if ( !psSwapChain->stHdmiTiler.alloc ) { // if ( AllocTilerForHdmi(psSwapChain, psDevInfo) ) { ERROR_PRINTK("Tiler memory for HDMI GUI cloning is not allocated\n"); mutex_unlock(&psSwapChain->stHdmiTiler.lock); return; // } } if ( psSwapChain->stHdmiTiler.alloc ) //if Tiler memory is allocated { unsigned long line_offset; unsigned long w, h; unsigned long src_stride, dst_stride; unsigned long i; unsigned char *dst, *src; unsigned long pStride; u32 j, *src_4, *dst_4, *dst1_4; int ch; src_stride = psDevInfo->sFBInfo.ulByteStride; dst_stride = psSwapChain->stHdmiTiler.vStride; line_offset = fb_offset / src_stride; h = psDevInfo->sFBInfo.ulHeight; w = psDevInfo->sFBInfo.ulWidth; pStride = psSwapChain->stHdmiTiler.pStride; //Copy dst = (unsigned char*)psSwapChain->stHdmiTiler.vAddr + (line_offset * dst_stride); src = (unsigned char*)framebuffer->screen_base + fb_offset; DEBUG_PRINTK("Copy Start h:%d, src:0x%p src_stride:%d, dst:0x%p dst_stride:%d, line offset:%d, pStride:%d\n", h, src, src_stride, dst, dst_stride, line_offset, pStride); if( psSwapChain->s3d_type==omap_dss_overlay_s3d_side_by_side && s3d_type_in_video_hdmi == omap_dss_overlay_s3d_top_bottom) { for(j=0; j<h/2; j++) { src_4 = (u32 *)(src + src_stride*j); dst_4 = (u32 *)(dst + dst_stride*2*j); dst1_4 = (u32 *)(dst + dst_stride*2*j + w*2); for(i=0;i<w/2;i++) { *dst_4++ = *src_4; *dst1_4++ = *src_4++; src_4++; } src_4 = (u32 *)(src + src_stride*j); dst_4 = (u32 *)(dst + dst_stride*(2*j+1)); dst1_4 = (u32 *)(dst + dst_stride*(2*j+1) + w*2); for(i=0;i<w/2;i++) { *dst_4++ = *src_4; *dst1_4++ = *src_4++; src_4++; } } info.s3d_type = omap_dss_overlay_s3d_top_bottom; } else { if(hdmi_dma.state == HDMI_DMA_DONE) { ch = hdmi_dma.frame_pos; hdmi->get_overlay_info(hdmi, &hdmi_dma.info[ch]); hdmi_dma.hdmi = hdmi; printk("S:%x\n", psSwapChain->stHdmiTiler.pAddr + (h * pStride * ch)); omap_set_dma_transfer_params(hdmi_dma.lch, OMAP_DMA_DATA_TYPE_S32, src_stride>>2, h, 0, 0, 0); omap_set_dma_src_params(hdmi_dma.lch, 0, OMAP_DMA_AMODE_POST_INC, framebuffer->fix.smem_start + fb_offset, 1, 1); omap_set_dma_dest_params(hdmi_dma.lch, 0, OMAP_DMA_AMODE_DOUBLE_IDX, psSwapChain->stHdmiTiler.pAddr + (h * pStride * ch), 1, pStride - src_stride + 1 ); omap_dma_set_prio_lch(hdmi_dma.lch, DMA_CH_PRIO_HIGH, DMA_CH_PRIO_HIGH); omap_set_dma_src_burst_mode(hdmi_dma.lch, OMAP_DMA_DATA_BURST_16); omap_set_dma_dest_burst_mode(hdmi_dma.lch, OMAP_DMA_DATA_BURST_16); omap_start_dma(hdmi_dma.lch); hdmi_dma.state = HDMI_DMA_TRANSFERRING; if ( omap_overlay_info_req[hdmi->id].status==2 ) { hdmi_dma.info[ch].enabled = omap_overlay_info_req[hdmi->id].enabled; hdmi_dma.info[ch].rotation = omap_overlay_info_req[hdmi->id].rotation; hdmi_dma.info[ch].pos_x = omap_overlay_info_req[hdmi->id].pos_x; hdmi_dma.info[ch].pos_y = omap_overlay_info_req[hdmi->id].pos_y; hdmi_dma.info[ch].out_width = omap_overlay_info_req[hdmi->id].out_width; hdmi_dma.info[ch].out_height = omap_overlay_info_req[hdmi->id].out_height; hdmi_dma.info[ch].global_alpha = omap_overlay_info_req[hdmi->id].global_alpha; hdmi_dma.info[ch].zorder = omap_overlay_info_req[hdmi->id].zorder; } //fill info //@todo not good find another way later hdmi_dma.info[ch].color_mode = ofbi->overlays[0]->info.color_mode; hdmi_dma.info[ch].paddr = psSwapChain->stHdmiTiler.pAddr + (h * pStride * ch); hdmi_dma.info[ch].vaddr = NULL; //no need if ( hdmi_dma.info[ch].rotation==OMAP_DSS_ROT_90 || hdmi_dma.info[ch].rotation==OMAP_DSS_ROT_270 ) { hdmi_dma.info[ch].width = h; hdmi_dma.info[ch].height = w; hdmi_dma.info[ch].screen_width = h; } else { hdmi_dma.info[ch].width =w; hdmi_dma.info[ch].height = h; hdmi_dma.info[ch].screen_width = w; } hdmi_dma.info[ch].rotation_type = OMAP_DSS_ROT_TILER; hdmi_dma.info[ch].s3d_type = psSwapChain->s3d_type; hdmi_dma.curr_frame = hdmi_dma.frame_pos; if( ++hdmi_dma.frame_pos >= HDMI_DMA_MAX ) hdmi_dma.frame_pos = 0; if( omap_overlay_info_req[hdmi->id].status == 2) { omap_overlay_info_req[hdmi->id].status = 0; } } else printk("DOLCOM : DMA busy!!!!!!!!!!!!!!!!!\n");