u32 tegra_apb_readl(unsigned long offset) { struct tegra_dma_req req; int ret; if (!tegra_apb_dma && !tegra_apb_init()) return readl(IO_TO_VIRT(offset)); mutex_lock(&tegra_apb_dma_lock); req.complete = apb_dma_complete; req.to_memory = 1; req.dest_addr = tegra_apb_bb_phys; req.dest_bus_width = 32; req.dest_wrap = 1; req.source_addr = offset; req.source_bus_width = 32; req.source_wrap = 4; req.req_sel = TEGRA_DMA_REQ_SEL_CNTR; req.size = 4; INIT_COMPLETION(tegra_apb_wait); tegra_dma_enqueue_req(tegra_apb_dma, &req); ret = wait_for_completion_timeout(&tegra_apb_wait, msecs_to_jiffies(50)); if (WARN(ret == 0, "apb read dma timed out")) { tegra_dma_dequeue_req(tegra_apb_dma, &req); *(u32 *)tegra_apb_bb = 0; } mutex_unlock(&tegra_apb_dma_lock); return *((u32 *)tegra_apb_bb); }
static void tegra_start_rx(struct uart_port *u) { struct tegra_uart_port *t; unsigned char ier; t = container_of(u, struct tegra_uart_port, uport); if (t->rts_active) set_rts(t, true); if (!t->rx_in_progress) { wait_sym_time(t, 1); /* wait a character interval */ /* Clear the received Bytes from FIFO */ tegra_fifo_reset(t, UART_FCR_CLEAR_RCVR); uart_readb(t, UART_LSR); ier = 0; ier |= (UART_IER_RLSI | UART_IER_RTOIE); if (t->use_rx_dma) ier |= UART_IER_EORD; else ier |= UART_IER_RDI; t->ier_shadow |= ier; uart_writeb(t, t->ier_shadow, UART_IER); t->rx_in_progress = 1; if (t->use_rx_dma && t->rx_dma) tegra_dma_enqueue_req(t->rx_dma, &t->rx_dma_req); tty_flip_buffer_push(u->state->port.tty); } return; }
static inline void apb_writel(u32 value, unsigned long offset) { struct tegra_dma_req req; int ret; if (!tegra_apb_dma) { writel(value, IO_TO_VIRT(offset)); return; } mutex_lock(&tegra_apb_dma_lock); *((u32 *)tegra_apb_bb) = value; req.complete = apb_dma_complete; req.to_memory = 0; req.dest_addr = offset; req.dest_wrap = 4; req.dest_bus_width = 32; req.source_addr = tegra_apb_bb_phys; req.source_bus_width = 32; req.source_wrap = 1; req.req_sel = 0; req.size = 4; INIT_COMPLETION(tegra_apb_wait); tegra_dma_enqueue_req(tegra_apb_dma, &req); ret = wait_for_completion_timeout(&tegra_apb_wait, msecs_to_jiffies(400)); if (WARN(ret == 0, "apb write dma timed out")) tegra_dma_dequeue_req(tegra_apb_dma, &req); mutex_unlock(&tegra_apb_dma_lock); }
static void tegra_pcm_queue_dma(struct tegra_runtime_data *prtd) { struct snd_pcm_substream *substream = prtd->substream; struct snd_dma_buffer *buf = &substream->dma_buffer; struct tegra_dma_req *dma_req; unsigned long addr; dma_req = &prtd->dma_req[prtd->dma_req_idx]; if (++prtd->dma_req_idx >= prtd->dma_req_count) prtd->dma_req_idx -= prtd->dma_req_count; if (prtd->avp_dma_addr) addr = prtd->avp_dma_addr + prtd->dma_pos; else addr = buf->addr + prtd->dma_pos; prtd->dma_pos += dma_req->size; if (prtd->dma_pos >= prtd->dma_pos_end) prtd->dma_pos = 0; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) dma_req->source_addr = addr; else dma_req->dest_addr = addr; tegra_dma_enqueue_req(prtd->dma_chan, dma_req); }
static int tegra_start_dma_rx(struct tegra_uart_port *t) { wmb(); if (tegra_dma_enqueue_req(t->rx_dma, &t->rx_dma_req)) { dev_err(t->uport.dev, "Could not enqueue Rx DMA req\n"); return -EINVAL; } return 0; }
static int tegra_start_dma_rx(struct tegra_uart_port *t) { wmb(); dma_sync_single_for_device(t->uport.dev, t->rx_dma_req.dest_addr, t->rx_dma_req.size, DMA_TO_DEVICE); if (tegra_dma_enqueue_req(t->rx_dma, &t->rx_dma_req)) { dev_err(t->uport.dev, "Could not enqueue Rx DMA req\n"); return -EINVAL; } return 0; }
static void tegra_start_dma_tx(struct tegra_uart_port *t, unsigned long bytes) { struct circ_buf *xmit; xmit = &t->uport.state->xmit; dma_sync_single_for_device(t->uport.dev, t->xmit_dma_addr, UART_XMIT_SIZE, DMA_TO_DEVICE); t->fcr_shadow &= ~UART_FCR_T_TRIG_11; t->fcr_shadow |= TEGRA_UART_TX_TRIG_4B; uart_writeb(t, t->fcr_shadow, UART_FCR); t->tx_bytes = bytes & ~(sizeof(u32)-1); t->tx_dma_req.source_addr = t->xmit_dma_addr + xmit->tail; t->tx_dma_req.size = t->tx_bytes; t->tx_in_progress = TEGRA_TX_DMA; tegra_dma_enqueue_req(t->tx_dma, &t->tx_dma_req); }