static int tegra_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_pcm_runtime *runtime = substream->runtime; struct tegra_runtime_data *prtd = runtime->private_data; unsigned long flags; switch (cmd) { case SNDRV_PCM_TRIGGER_START: prtd->dma_pos = 0; prtd->dma_pos_end = frames_to_bytes(runtime, runtime->periods * runtime->period_size); prtd->period_index = 0; prtd->dma_req_idx = 0; case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: spin_lock_irqsave(&prtd->lock, flags); prtd->running = 1; #if 0 if( prtd->dma_chan ){ prtd->pcm_timeout_tick = jiffies; del_timer(&prtd->pcm_timeout); prtd->pcm_timeout.function = pcm_timeout_func; prtd->pcm_timeout.expires = jiffies + msecs_to_jiffies((runtime->period_size/(runtime->rate/1000))*runtime->periods*2); prtd->pcm_timeout.data = (unsigned long)prtd; add_timer(&prtd->pcm_timeout); prtd->callback_time = jiffies; } #endif spin_unlock_irqrestore(&prtd->lock, flags); if( prtd->dma_chan ){ tegra_pcm_queue_dma(prtd); tegra_pcm_queue_dma(prtd); } break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: spin_lock_irqsave(&prtd->lock, flags); prtd->running = 0; #if 0 if( prtd->dma_chan ){ del_timer(&prtd->pcm_timeout); } #endif spin_unlock_irqrestore(&prtd->lock, flags); if( prtd->dma_chan ){ tegra_dma_dequeue_req(prtd->dma_chan, &prtd->dma_req[0]); tegra_dma_dequeue_req(prtd->dma_chan, &prtd->dma_req[1]); } break; default: return -EINVAL; } return 0; }
static void tegra_stop_rx(struct uart_port *u) { struct tegra_uart_port *t; unsigned char ier; t = container_of(u, struct tegra_uart_port, uport); if (t->rts_active) set_rts(t, false); if (t->rx_in_progress) { wait_sym_time(t, 1); /* wait a character interval */ ier = t->ier_shadow; ier &= ~(UART_IER_RDI | UART_IER_RLSI | UART_IER_RTOIE | UART_IER_EORD); t->ier_shadow = ier; uart_writeb(t, ier, UART_IER); t->rx_in_progress = 0; if (t->use_rx_dma && t->rx_dma) tegra_dma_dequeue_req(t->rx_dma, &t->rx_dma_req); else do_handle_rx_pio(t); tty_flip_buffer_push(u->state->port.tty); } return; }
u32 tegra_apb_readl(unsigned long offset) { struct tegra_dma_req req; int ret; if (!tegra_apb_dma && !tegra_apb_init()) return readl(IO_TO_VIRT(offset)); mutex_lock(&tegra_apb_dma_lock); req.complete = apb_dma_complete; req.to_memory = 1; req.dest_addr = tegra_apb_bb_phys; req.dest_bus_width = 32; req.dest_wrap = 1; req.source_addr = offset; req.source_bus_width = 32; req.source_wrap = 4; req.req_sel = TEGRA_DMA_REQ_SEL_CNTR; req.size = 4; INIT_COMPLETION(tegra_apb_wait); tegra_dma_enqueue_req(tegra_apb_dma, &req); ret = wait_for_completion_timeout(&tegra_apb_wait, msecs_to_jiffies(50)); if (WARN(ret == 0, "apb read dma timed out")) { tegra_dma_dequeue_req(tegra_apb_dma, &req); *(u32 *)tegra_apb_bb = 0; } mutex_unlock(&tegra_apb_dma_lock); return *((u32 *)tegra_apb_bb); }
static inline void apb_writel(u32 value, unsigned long offset) { struct tegra_dma_req req; int ret; if (!tegra_apb_dma) { writel(value, IO_TO_VIRT(offset)); return; } mutex_lock(&tegra_apb_dma_lock); *((u32 *)tegra_apb_bb) = value; req.complete = apb_dma_complete; req.to_memory = 0; req.dest_addr = offset; req.dest_wrap = 4; req.dest_bus_width = 32; req.source_addr = tegra_apb_bb_phys; req.source_bus_width = 32; req.source_wrap = 1; req.req_sel = 0; req.size = 4; INIT_COMPLETION(tegra_apb_wait); tegra_dma_enqueue_req(tegra_apb_dma, &req); ret = wait_for_completion_timeout(&tegra_apb_wait, msecs_to_jiffies(400)); if (WARN(ret == 0, "apb write dma timed out")) tegra_dma_dequeue_req(tegra_apb_dma, &req); mutex_unlock(&tegra_apb_dma_lock); }
void tegra_dma_dequeue(struct tegra_dma_channel *ch) { struct tegra_dma_req *req; req = list_entry(ch->list.next, typeof(*req), node); tegra_dma_dequeue_req(ch, req); return; }
static int tegra_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_pcm_runtime *runtime = substream->runtime; struct tegra_runtime_data *prtd = runtime->private_data; unsigned long flags; switch (cmd) { case SNDRV_PCM_TRIGGER_START: prtd->dma_pos = 0; prtd->dma_pos_end = frames_to_bytes(runtime, runtime->periods * runtime->period_size); prtd->period_index = 0; prtd->dma_req_idx = 0; /* Fall-through */ case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: #ifdef CONFIG_HAS_WAKELOCK wake_lock(&prtd->tegra_wake_lock); #endif spin_lock_irqsave(&prtd->lock, flags); prtd->running = 1; spin_unlock_irqrestore(&prtd->lock, flags); tegra_pcm_queue_dma(prtd); tegra_pcm_queue_dma(prtd); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: spin_lock_irqsave(&prtd->lock, flags); prtd->running = 0; spin_unlock_irqrestore(&prtd->lock, flags); tegra_dma_dequeue_req(prtd->dma_chan, &prtd->dma_req[0]); tegra_dma_dequeue_req(prtd->dma_chan, &prtd->dma_req[1]); #ifdef CONFIG_HAS_WAKELOCK wake_unlock(&prtd->tegra_wake_lock); #endif break; default: return -EINVAL; } return 0; }
static void tegra_stop_tx(struct uart_port *u) { struct tegra_uart_port *t; t = container_of(u, struct tegra_uart_port, uport); if (t->use_tx_dma) tegra_dma_dequeue_req(t->tx_dma, &t->tx_dma_req); return; }
/* Lock already taken */ static void do_handle_rx_dma(struct tegra_uart_port *t) { struct uart_port *u = &t->uport; if (t->rts_active) set_rts(t, false); tegra_dma_dequeue_req(t->rx_dma, &t->rx_dma_req); tty_flip_buffer_push(u->state->port.tty); /* enqueue the request again */ tegra_start_dma_rx(t); if (t->rts_active) set_rts(t, true); }
/* * Flush any TX data submitted for DMA and PIO. Called when the * TX circular buffer is reset. */ static void tegra_flush_buffer(struct uart_port *u) { struct tegra_uart_port *t; dev_vdbg(u->dev, "%s called", __func__); t = container_of(u, struct tegra_uart_port, uport); t->tx_bytes = 0; if (t->use_tx_dma) { tegra_dma_dequeue_req(t->tx_dma, &t->tx_dma_req); t->tx_dma_req.size = 0; } return; }