static void tegra_shutdown(struct uart_port *u) { struct tegra_uart_port *t; t = container_of(u, struct tegra_uart_port, uport); dev_vdbg(u->dev, "+tegra_shutdown\n"); if (t->is_irda && t->irda_shutdown) t->irda_shutdown(); tegra_uart_hw_deinit(t); t->rx_in_progress = 0; t->tx_in_progress = 0; tegra_uart_free_rx_dma(t); if (t->use_tx_dma) { tegra_dma_free_channel(t->tx_dma); t->tx_dma = NULL; t->use_tx_dma = false; dma_unmap_single(t->uport.dev, t->xmit_dma_addr, UART_XMIT_SIZE, DMA_TO_DEVICE); t->xmit_dma_addr = 0; } free_irq(u->irq, t); tasklet_kill(&t->tlet); dev_vdbg(u->dev, "-tegra_shutdown\n"); }
bool tegra_apb_init(void) { struct tegra_dma_channel *ch; mutex_lock(&tegra_apb_dma_lock); if (tegra_apb_dma) goto out; ch = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT | TEGRA_DMA_SHARED); if (!ch) goto out_fail; tegra_apb_bb = dma_alloc_coherent(NULL, sizeof(u32), &tegra_apb_bb_phys, GFP_KERNEL); if (!tegra_apb_bb) { pr_err("%s: can not allocate bounce buffer\n", __func__); tegra_dma_free_channel(ch); goto out_fail; } tegra_apb_dma = ch; out: mutex_unlock(&tegra_apb_dma_lock); return true; out_fail: mutex_unlock(&tegra_apb_dma_lock); return false; }
static int tegra_pcm_open(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct tegra_runtime_data *prtd; struct snd_soc_pcm_runtime *rtd = substream->private_data; struct tegra_pcm_dma_params * dmap; int ret = 0; prtd = kzalloc(sizeof(struct tegra_runtime_data), GFP_KERNEL); if (prtd == NULL) return -ENOMEM; runtime->private_data = prtd; prtd->substream = substream; spin_lock_init(&prtd->lock); dmap = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); if (dmap) { prtd->dma_req[0].dev = prtd; prtd->dma_req[1].dev = prtd; prtd->dma_chan = tegra_dma_allocate_channel( TEGRA_DMA_MODE_CONTINUOUS_SINGLE, "pcm"); if (prtd->dma_chan == NULL) { ret = -ENOMEM; goto err; } } /* Set HW params now that initialization is complete */ snd_soc_set_runtime_hwparams(substream, &tegra_pcm_hardware); /* Ensure period size is multiple of 8 */ ret = snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 0x8); if (ret < 0) goto err; /* Ensure that buffer size is a multiple of period size */ ret = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (ret < 0) goto err; return 0; err: if (prtd->dma_chan) { tegra_dma_free_channel(prtd->dma_chan); } kfree(prtd); return ret; }
static void tegra_uart_free_rx_dma(struct tegra_uart_port *t) { if (!t->use_rx_dma) return; tegra_dma_free_channel(t->rx_dma); t->rx_dma = NULL; t->use_rx_dma = false; }
static int tegra_pcm_open(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct tegra_runtime_data *prtd; struct snd_soc_pcm_runtime *rtd = substream->private_data; struct tegra_pcm_dma_params * dmap; int ret = 0; prtd = kzalloc(sizeof(struct tegra_runtime_data), GFP_KERNEL); if (prtd == NULL) return -ENOMEM; runtime->private_data = prtd; prtd->substream = substream; spin_lock_init(&prtd->lock); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { dmap = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); setup_dma_tx_request(&prtd->dma_req[0], dmap); setup_dma_tx_request(&prtd->dma_req[1], dmap); } else { dmap = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); setup_dma_rx_request(&prtd->dma_req[0], dmap); setup_dma_rx_request(&prtd->dma_req[1], dmap); } prtd->dma_req[0].dev = prtd; prtd->dma_req[1].dev = prtd; prtd->dma_chan = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT); if (prtd->dma_chan == NULL) { ret = -ENOMEM; goto err; } /* */ snd_soc_set_runtime_hwparams(substream, &tegra_pcm_hardware); /* */ ret = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (ret < 0) goto err; return 0; err: if (prtd->dma_chan) { tegra_dma_free_channel(prtd->dma_chan); } kfree(prtd); return ret; }
static int tegra_pcm_close(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct tegra_runtime_data *prtd = runtime->private_data; tegra_dma_free_channel(prtd->dma_chan); kfree(prtd); return 0; }
static int tegra_pcm_close(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct tegra_runtime_data *prtd = runtime->private_data; del_timer_sync(&prtd->pcm_timeout); prtd->callback_time = 0; if (prtd->dma_chan) tegra_dma_free_channel(prtd->dma_chan); kfree(prtd); return 0; }
static int tegra_pcm_close(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct tegra_runtime_data *prtd = runtime->private_data; #ifdef CONFIG_HAS_WAKELOCK wake_lock_destroy(&prtd->tegra_wake_lock); #endif if (prtd->dma_chan) tegra_dma_free_channel(prtd->dma_chan); kfree(prtd); return 0; }
static void tegra_uart_free_rx_dma(struct tegra_uart_port *t) { if (!t->use_rx_dma) return; tegra_dma_free_channel(t->rx_dma); t->rx_dma = NULL; if (likely(t->rx_dma_req.dest_addr)) dma_free_coherent(t->uport.dev, t->rx_dma_req.size, t->rx_dma_req.virt_addr, t->rx_dma_req.dest_addr); t->rx_dma_req.dest_addr = 0; t->rx_dma_req.virt_addr = NULL; t->use_rx_dma = false; }
static int tegra_init_apb_dma(void) { #ifdef CONFIG_TEGRA_SYSTEM_DMA tegra_apb_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT | TEGRA_DMA_SHARED, "apbio"); if (!tegra_apb_dma) { pr_err("%s: can not allocate dma channel\n", __func__); return -ENODEV; } tegra_apb_bb = dma_alloc_coherent(NULL, sizeof(u32), &tegra_apb_bb_phys, GFP_KERNEL); if (!tegra_apb_bb) { pr_err("%s: can not allocate bounce buffer\n", __func__); tegra_dma_free_channel(tegra_apb_dma); tegra_apb_dma = NULL; return -ENOMEM; } #endif return 0; }
static int tegra_pcm_open(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct tegra_runtime_data *prtd; struct snd_soc_pcm_runtime *rtd = substream->private_data; struct tegra_pcm_dma_params * dmap; int ret = 0; prtd = kzalloc(sizeof(struct tegra_runtime_data), GFP_KERNEL); if (prtd == NULL) return -ENOMEM; runtime->private_data = prtd; prtd->substream = substream; spin_lock_init(&prtd->lock); dmap = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); if (dmap) { if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { setup_dma_tx_request(&prtd->dma_req[0], dmap); setup_dma_tx_request(&prtd->dma_req[1], dmap); } else { setup_dma_rx_request(&prtd->dma_req[0], dmap); setup_dma_rx_request(&prtd->dma_req[1], dmap); } prtd->dma_req[0].dev = prtd; prtd->dma_req[1].dev = prtd; prtd->dma_chan = tegra_dma_allocate_channel( TEGRA_DMA_MODE_CONTINUOUS_SINGLE, "pcm"); if (prtd->dma_chan == NULL) { ret = -ENOMEM; goto err; } } /* Set HW params now that initialization is complete */ snd_soc_set_runtime_hwparams(substream, &tegra_pcm_hardware); /* Ensure period size is multiple of 8 */ ret = snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 0x8); if (ret < 0) goto err; /* Ensure that buffer size is a multiple of period size */ ret = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (ret < 0) goto err; #ifdef CONFIG_HAS_WAKELOCK snprintf(prtd->tegra_wake_lock_name, sizeof(prtd->tegra_wake_lock_name), "tegra-pcm-%s-%d", (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ? "out" : "in", substream->pcm->device); wake_lock_init(&prtd->tegra_wake_lock, WAKE_LOCK_SUSPEND, prtd->tegra_wake_lock_name); #endif return 0; err: if (prtd->dma_chan) { tegra_dma_free_channel(prtd->dma_chan); } kfree(prtd); return ret; }