static int tegra_pcm_open(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct tegra_runtime_data *prtd; struct snd_soc_pcm_runtime *rtd = substream->private_data; struct tegra_pcm_dma_params * dmap; int ret = 0; prtd = kzalloc(sizeof(struct tegra_runtime_data), GFP_KERNEL); if (prtd == NULL) return -ENOMEM; runtime->private_data = prtd; prtd->substream = substream; spin_lock_init(&prtd->lock); dmap = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); if (dmap) { prtd->dma_req[0].dev = prtd; prtd->dma_req[1].dev = prtd; prtd->dma_chan = tegra_dma_allocate_channel( TEGRA_DMA_MODE_CONTINUOUS_SINGLE, "pcm"); if (prtd->dma_chan == NULL) { ret = -ENOMEM; goto err; } } /* Set HW params now that initialization is complete */ snd_soc_set_runtime_hwparams(substream, &tegra_pcm_hardware); /* Ensure period size is multiple of 8 */ ret = snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 0x8); if (ret < 0) goto err; /* Ensure that buffer size is a multiple of period size */ ret = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (ret < 0) goto err; return 0; err: if (prtd->dma_chan) { tegra_dma_free_channel(prtd->dma_chan); } kfree(prtd); return ret; }
static int tegra_pcm_open(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct tegra_runtime_data *prtd; struct snd_soc_pcm_runtime *rtd = substream->private_data; struct tegra_pcm_dma_params * dmap; int ret = 0; prtd = kzalloc(sizeof(struct tegra_runtime_data), GFP_KERNEL); if (prtd == NULL) return -ENOMEM; runtime->private_data = prtd; prtd->substream = substream; spin_lock_init(&prtd->lock); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { dmap = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); setup_dma_tx_request(&prtd->dma_req[0], dmap); setup_dma_tx_request(&prtd->dma_req[1], dmap); } else { dmap = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); setup_dma_rx_request(&prtd->dma_req[0], dmap); setup_dma_rx_request(&prtd->dma_req[1], dmap); } prtd->dma_req[0].dev = prtd; prtd->dma_req[1].dev = prtd; prtd->dma_chan = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT); if (prtd->dma_chan == NULL) { ret = -ENOMEM; goto err; } /* */ snd_soc_set_runtime_hwparams(substream, &tegra_pcm_hardware); /* */ ret = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (ret < 0) goto err; return 0; err: if (prtd->dma_chan) { tegra_dma_free_channel(prtd->dma_chan); } kfree(prtd); return ret; }
static int tegra_uart_init_rx_dma(struct tegra_uart_port *t) { t->rx_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_CONTINUOUS, "uart_rx_%d", t->uport.line); if (!t->rx_dma) { dev_err(t->uport.dev, "%s: failed to allocate RX DMA.\n", __func__); return -ENODEV; } return 0; }
static int tegra_init_apb_dma(void) { #ifdef CONFIG_TEGRA_SYSTEM_DMA tegra_apb_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT | TEGRA_DMA_SHARED, "apbio"); if (!tegra_apb_dma) { pr_err("%s: can not allocate dma channel\n", __func__); return -ENODEV; } tegra_apb_bb = dma_alloc_coherent(NULL, sizeof(u32), &tegra_apb_bb_phys, GFP_KERNEL); if (!tegra_apb_bb) { pr_err("%s: can not allocate bounce buffer\n", __func__); tegra_dma_free_channel(tegra_apb_dma); tegra_apb_dma = NULL; return -ENOMEM; } #endif return 0; }
static int tegra_uart_init_rx_dma(struct tegra_uart_port *t) { dma_addr_t rx_dma_phys; void *rx_dma_virt; t->rx_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_CONTINUOUS, "uart_rx_%d", t->uport.line); if (!t->rx_dma) { dev_err(t->uport.dev, "%s: failed to allocate RX DMA.\n", __func__); return -ENODEV; } t->rx_dma_req.size = UART_RX_DMA_BUFFER_SIZE; rx_dma_virt = dma_alloc_coherent(t->uport.dev, t->rx_dma_req.size, &rx_dma_phys, GFP_KERNEL); if (!rx_dma_virt) { dev_err(t->uport.dev, "DMA buffers allocate failed\n"); goto fail; } t->rx_dma_req.dest_addr = rx_dma_phys; t->rx_dma_req.virt_addr = rx_dma_virt; t->rx_dma_req.source_addr = (unsigned long)t->uport.mapbase; t->rx_dma_req.source_wrap = 4; t->rx_dma_req.dest_wrap = 0; t->rx_dma_req.to_memory = 1; t->rx_dma_req.source_bus_width = 8; t->rx_dma_req.dest_bus_width = 32; t->rx_dma_req.req_sel = dma_req_sel[t->uport.line]; t->rx_dma_req.complete = tegra_rx_dma_complete_callback; t->rx_dma_req.threshold = tegra_rx_dma_threshold_callback; t->rx_dma_req.dev = t; return 0; fail: tegra_uart_free_rx_dma(t); return -ENODEV; }
static int tegra_startup(struct uart_port *u) { struct tegra_uart_port *t = container_of(u, struct tegra_uart_port, uport); int ret = 0; struct tegra_uart_platform_data *pdata; t = container_of(u, struct tegra_uart_port, uport); sprintf(t->port_name, "tegra_uart_%d", u->line); t->use_tx_dma = false; if (!TX_FORCE_PIO) { t->tx_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT, "uart_tx_%d", u->line); if (t->tx_dma) t->use_tx_dma = true; else pr_err("%s: failed to allocate TX DMA.\n", __func__); } if (t->use_tx_dma) { t->tx_dma_req.instance = u->line; t->tx_dma_req.complete = tegra_tx_dma_complete_callback; t->tx_dma_req.to_memory = 0; t->tx_dma_req.dest_addr = (unsigned long)t->uport.mapbase; t->tx_dma_req.dest_wrap = 4; t->tx_dma_req.source_wrap = 0; t->tx_dma_req.source_bus_width = 32; t->tx_dma_req.dest_bus_width = 8; t->tx_dma_req.req_sel = dma_req_sel[t->uport.line]; t->tx_dma_req.dev = t; t->tx_dma_req.size = 0; t->xmit_dma_addr = dma_map_single(t->uport.dev, t->uport.state->xmit.buf, UART_XMIT_SIZE, DMA_TO_DEVICE); } t->tx_in_progress = 0; t->use_rx_dma = false; if (!RX_FORCE_PIO && t->rx_dma_req.virt_addr) { if (!tegra_uart_init_rx_dma(t)) t->use_rx_dma = true; } ret = tegra_uart_hw_init(t); if (ret) goto fail; if (t->is_irda && t->irda_start) t->irda_start(); pdata = u->dev->platform_data; if (pdata && pdata->is_loopback) t->mcr_shadow |= UART_MCR_LOOP; dev_dbg(u->dev, "Requesting IRQ %d\n", u->irq); ret = request_irq(u->irq, tegra_uart_isr, IRQF_DISABLED | IRQF_TRIGGER_HIGH, t->port_name, t); if (ret) { dev_err(u->dev, "Failed to register ISR for IRQ %d\n", u->irq); goto fail; } dev_dbg(u->dev, "Started UART port %d\n", u->line); return 0; fail: dev_err(u->dev, "Tegra UART startup failed\n"); return ret; }
static int tegra_pcm_open(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct tegra_runtime_data *prtd; struct snd_soc_pcm_runtime *rtd = substream->private_data; struct tegra_pcm_dma_params * dmap; int ret = 0; prtd = kzalloc(sizeof(struct tegra_runtime_data), GFP_KERNEL); if (prtd == NULL) return -ENOMEM; runtime->private_data = prtd; prtd->substream = substream; spin_lock_init(&prtd->lock); dmap = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); if (dmap) { if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { setup_dma_tx_request(&prtd->dma_req[0], dmap); setup_dma_tx_request(&prtd->dma_req[1], dmap); } else { setup_dma_rx_request(&prtd->dma_req[0], dmap); setup_dma_rx_request(&prtd->dma_req[1], dmap); } prtd->dma_req[0].dev = prtd; prtd->dma_req[1].dev = prtd; prtd->dma_chan = tegra_dma_allocate_channel( TEGRA_DMA_MODE_CONTINUOUS_SINGLE, "pcm"); if (prtd->dma_chan == NULL) { ret = -ENOMEM; goto err; } } /* Set HW params now that initialization is complete */ snd_soc_set_runtime_hwparams(substream, &tegra_pcm_hardware); /* Ensure period size is multiple of 8 */ ret = snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 0x8); if (ret < 0) goto err; /* Ensure that buffer size is a multiple of period size */ ret = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (ret < 0) goto err; #ifdef CONFIG_HAS_WAKELOCK snprintf(prtd->tegra_wake_lock_name, sizeof(prtd->tegra_wake_lock_name), "tegra-pcm-%s-%d", (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ? "out" : "in", substream->pcm->device); wake_lock_init(&prtd->tegra_wake_lock, WAKE_LOCK_SUSPEND, prtd->tegra_wake_lock_name); #endif return 0; err: if (prtd->dma_chan) { tegra_dma_free_channel(prtd->dma_chan); } kfree(prtd); return ret; }