static int __init brcm_spum_init(void) { spum_dev = (struct brcm_spum_device *) kzalloc(sizeof(struct brcm_spum_device), GFP_KERNEL); spin_lock_init(&spum_dev->lock); crypto_init_queue(&spum_dev->spum_queue, SPUM_QUEUE_LENGTH); spum_dev->flags = 0; /* Aquire DMA channels */ if (dma_request_chan(&spum_dev->rx_dma_chan, "SPUM_OpenA") != 0) { pr_err("%s: Rx dma_request_chan failed\n", __func__); return -1; } if (dma_request_chan(&spum_dev->tx_dma_chan, "SPUM_OpenB") != 0) { pr_err("%s: Tx dma_request_chan failed\n", __func__); goto err; } pr_info("%s: DMA channel aquired rx %d tx %d\n", __func__, spum_dev->rx_dma_chan, spum_dev->tx_dma_chan); return 0; err: dma_free_chan(spum_dev->rx_dma_chan); return -EIO; }
static int jz4740_mmc_acquire_dma_channels(struct jz4740_mmc_host *host) { host->dma_tx = dma_request_chan(mmc_dev(host->mmc), "tx"); if (IS_ERR(host->dma_tx)) { dev_err(mmc_dev(host->mmc), "Failed to get dma_tx channel\n"); return PTR_ERR(host->dma_tx); } host->dma_rx = dma_request_chan(mmc_dev(host->mmc), "rx"); if (IS_ERR(host->dma_rx)) { dev_err(mmc_dev(host->mmc), "Failed to get dma_rx channel\n"); dma_release_channel(host->dma_tx); return PTR_ERR(host->dma_rx); } return 0; }
static int stm32_spdifrx_dma_ctrl_register(struct device *dev, struct stm32_spdifrx_data *spdifrx) { int ret; spdifrx->dmab = devm_kzalloc(dev, sizeof(struct snd_dma_buffer), GFP_KERNEL); if (!spdifrx->dmab) return -ENOMEM; spdifrx->dmab->dev.type = SNDRV_DMA_TYPE_DEV_IRAM; spdifrx->dmab->dev.dev = dev; ret = snd_dma_alloc_pages(spdifrx->dmab->dev.type, dev, SPDIFRX_CSR_BUF_LENGTH, spdifrx->dmab); if (ret < 0) { dev_err(dev, "snd_dma_alloc_pages returned error %d\n", ret); return ret; } spdifrx->ctrl_chan = dma_request_chan(dev, "rx-ctrl"); if (!spdifrx->ctrl_chan) { dev_err(dev, "dma_request_slave_channel failed\n"); return -EINVAL; } spdifrx->slave_config.direction = DMA_DEV_TO_MEM; spdifrx->slave_config.src_addr = (dma_addr_t)(spdifrx->phys_addr + STM32_SPDIFRX_CSR); spdifrx->slave_config.dst_addr = spdifrx->dmab->addr; spdifrx->slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; spdifrx->slave_config.src_maxburst = 1; ret = dmaengine_slave_config(spdifrx->ctrl_chan, &spdifrx->slave_config); if (ret < 0) { dev_err(dev, "dmaengine_slave_config returned error %d\n", ret); dma_release_channel(spdifrx->ctrl_chan); spdifrx->ctrl_chan = NULL; } return ret; };
static int sprd_rx_dma_config(struct uart_port *port, u32 burst) { struct sprd_uart_port *sp = container_of(port, struct sprd_uart_port, port); struct dma_slave_config cfg = { .src_addr = port->mapbase + SPRD_RXD, .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, .src_maxburst = burst, }; return dmaengine_slave_config(sp->rx_dma.chn, &cfg); } static void sprd_uart_dma_rx(struct uart_port *port) { struct sprd_uart_port *sp = container_of(port, struct sprd_uart_port, port); struct tty_port *tty = &port->state->port; port->icount.rx += sp->rx_dma.trans_len; tty_insert_flip_string(tty, sp->rx_buf_tail, sp->rx_dma.trans_len); tty_flip_buffer_push(tty); } static void sprd_uart_dma_irq(struct uart_port *port) { struct sprd_uart_port *sp = container_of(port, struct sprd_uart_port, port); struct dma_tx_state state; enum dma_status status; status = dmaengine_tx_status(sp->rx_dma.chn, sp->rx_dma.cookie, &state); if (status == DMA_ERROR) sprd_stop_rx(port); if (!state.residue && sp->pos == sp->rx_dma.phys_addr) return; if (!state.residue) { sp->rx_dma.trans_len = SPRD_UART_RX_SIZE + sp->rx_dma.phys_addr - sp->pos; sp->pos = sp->rx_dma.phys_addr; } else { sp->rx_dma.trans_len = state.residue - sp->pos; sp->pos = state.residue; } sprd_uart_dma_rx(port); sp->rx_buf_tail += sp->rx_dma.trans_len; } static void sprd_complete_rx_dma(void *data) { struct uart_port *port = (struct uart_port *)data; struct sprd_uart_port *sp = container_of(port, struct sprd_uart_port, port); struct dma_tx_state state; enum dma_status status; unsigned long flags; spin_lock_irqsave(&port->lock, flags); status = dmaengine_tx_status(sp->rx_dma.chn, sp->rx_dma.cookie, &state); if (status != DMA_COMPLETE) { sprd_stop_rx(port); spin_unlock_irqrestore(&port->lock, flags); return; } if (sp->pos != sp->rx_dma.phys_addr) { sp->rx_dma.trans_len = SPRD_UART_RX_SIZE + sp->rx_dma.phys_addr - sp->pos; sprd_uart_dma_rx(port); sp->rx_buf_tail += sp->rx_dma.trans_len; } if (sprd_start_dma_rx(port)) sprd_stop_rx(port); spin_unlock_irqrestore(&port->lock, flags); } static int sprd_start_dma_rx(struct uart_port *port) { struct sprd_uart_port *sp = container_of(port, struct sprd_uart_port, port); int ret; if (!sp->rx_dma.enable) return 0; sp->pos = sp->rx_dma.phys_addr; sp->rx_buf_tail = sp->rx_dma.virt; sprd_rx_full_thld(port, SPRD_RX_FIFO_FULL); ret = sprd_rx_dma_config(port, SPRD_RX_DMA_STEP); if (ret) return ret; return sprd_uart_dma_submit(port, &sp->rx_dma, SPRD_UART_RX_SIZE, DMA_DEV_TO_MEM, sprd_complete_rx_dma); } static void sprd_release_dma(struct uart_port *port) { struct sprd_uart_port *sp = container_of(port, struct sprd_uart_port, port); sprd_uart_dma_enable(port, false); if (sp->rx_dma.enable) dma_release_channel(sp->rx_dma.chn); if (sp->tx_dma.enable) dma_release_channel(sp->tx_dma.chn); sp->tx_dma.enable = false; sp->rx_dma.enable = false; } static void sprd_request_dma(struct uart_port *port) { struct sprd_uart_port *sp = container_of(port, struct sprd_uart_port, port); sp->tx_dma.enable = true; sp->rx_dma.enable = true; sp->tx_dma.chn = dma_request_chan(port->dev, "tx"); if (IS_ERR(sp->tx_dma.chn)) { dev_err(port->dev, "request TX DMA channel failed, ret = %ld\n", PTR_ERR(sp->tx_dma.chn)); sp->tx_dma.enable = false; } sp->rx_dma.chn = dma_request_chan(port->dev, "rx"); if (IS_ERR(sp->rx_dma.chn)) { dev_err(port->dev, "request RX DMA channel failed, ret = %ld\n", PTR_ERR(sp->rx_dma.chn)); sp->rx_dma.enable = false; } } static void sprd_stop_tx(struct uart_port *port) { struct sprd_uart_port *sp = container_of(port, struct sprd_uart_port, port); unsigned int ien, iclr; if (sp->tx_dma.enable) { sprd_stop_tx_dma(port); return; } iclr = serial_in(port, SPRD_ICLR); ien = serial_in(port, SPRD_IEN); iclr |= SPRD_IEN_TX_EMPTY; ien &= ~SPRD_IEN_TX_EMPTY; serial_out(port, SPRD_IEN, ien); serial_out(port, SPRD_ICLR, iclr); } static void sprd_start_tx(struct uart_port *port) { struct sprd_uart_port *sp = container_of(port, struct sprd_uart_port, port); unsigned int ien; if (sp->tx_dma.enable) { sprd_start_tx_dma(port); return; } ien = serial_in(port, SPRD_IEN); if (!(ien & SPRD_IEN_TX_EMPTY)) { ien |= SPRD_IEN_TX_EMPTY; serial_out(port, SPRD_IEN, ien); } } /* The Sprd serial does not support this function. */ static void sprd_break_ctl(struct uart_port *port, int break_state) { /* nothing to do */ } static int handle_lsr_errors(struct uart_port *port, unsigned int *flag, unsigned int *lsr) { int ret = 0; /* statistics */ if (*lsr & SPRD_LSR_BI) { *lsr &= ~(SPRD_LSR_FE | SPRD_LSR_PE); port->icount.brk++; ret = uart_handle_break(port); if (ret) return ret; } else if (*lsr & SPRD_LSR_PE) port->icount.parity++; else if (*lsr & SPRD_LSR_FE) port->icount.frame++; if (*lsr & SPRD_LSR_OE) port->icount.overrun++; /* mask off conditions which should be ignored */ *lsr &= port->read_status_mask; if (*lsr & SPRD_LSR_BI) *flag = TTY_BREAK; else if (*lsr & SPRD_LSR_PE) *flag = TTY_PARITY; else if (*lsr & SPRD_LSR_FE) *flag = TTY_FRAME; return ret; }
static int rockchip_spi_probe(struct platform_device *pdev) { int ret = 0; struct rockchip_spi *rs; struct spi_master *master; struct resource *mem; u32 rsd_nsecs; master = spi_alloc_master(&pdev->dev, sizeof(struct rockchip_spi)); if (!master) return -ENOMEM; platform_set_drvdata(pdev, master); rs = spi_master_get_devdata(master); /* Get basic io resource and map it */ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); rs->regs = devm_ioremap_resource(&pdev->dev, mem); if (IS_ERR(rs->regs)) { ret = PTR_ERR(rs->regs); goto err_ioremap_resource; } rs->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk"); if (IS_ERR(rs->apb_pclk)) { dev_err(&pdev->dev, "Failed to get apb_pclk\n"); ret = PTR_ERR(rs->apb_pclk); goto err_ioremap_resource; } rs->spiclk = devm_clk_get(&pdev->dev, "spiclk"); if (IS_ERR(rs->spiclk)) { dev_err(&pdev->dev, "Failed to get spi_pclk\n"); ret = PTR_ERR(rs->spiclk); goto err_ioremap_resource; } ret = clk_prepare_enable(rs->apb_pclk); if (ret) { dev_err(&pdev->dev, "Failed to enable apb_pclk\n"); goto err_ioremap_resource; } ret = clk_prepare_enable(rs->spiclk); if (ret) { dev_err(&pdev->dev, "Failed to enable spi_clk\n"); goto err_spiclk_enable; } spi_enable_chip(rs, 0); rs->type = SSI_MOTO_SPI; rs->master = master; rs->dev = &pdev->dev; rs->max_freq = clk_get_rate(rs->spiclk); if (!of_property_read_u32(pdev->dev.of_node, "rx-sample-delay-ns", &rsd_nsecs)) rs->rsd_nsecs = rsd_nsecs; rs->fifo_len = get_fifo_len(rs); if (!rs->fifo_len) { dev_err(&pdev->dev, "Failed to get fifo length\n"); ret = -EINVAL; goto err_get_fifo_len; } spin_lock_init(&rs->lock); pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); master->auto_runtime_pm = true; master->bus_num = pdev->id; master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP; master->num_chipselect = 2; master->dev.of_node = pdev->dev.of_node; master->bits_per_word_mask = SPI_BPW_MASK(16) | SPI_BPW_MASK(8); master->set_cs = rockchip_spi_set_cs; master->prepare_message = rockchip_spi_prepare_message; master->unprepare_message = rockchip_spi_unprepare_message; master->transfer_one = rockchip_spi_transfer_one; master->handle_err = rockchip_spi_handle_err; rs->dma_tx.ch = dma_request_chan(rs->dev, "tx"); if (IS_ERR(rs->dma_tx.ch)) { /* Check tx to see if we need defer probing driver */ if (PTR_ERR(rs->dma_tx.ch) == -EPROBE_DEFER) { ret = -EPROBE_DEFER; goto err_get_fifo_len; } dev_warn(rs->dev, "Failed to request TX DMA channel\n"); rs->dma_tx.ch = NULL; } rs->dma_rx.ch = dma_request_chan(rs->dev, "rx"); if (IS_ERR(rs->dma_rx.ch)) { if (PTR_ERR(rs->dma_rx.ch) == -EPROBE_DEFER) { dma_release_channel(rs->dma_tx.ch); rs->dma_tx.ch = NULL; ret = -EPROBE_DEFER; goto err_get_fifo_len; } dev_warn(rs->dev, "Failed to request RX DMA channel\n"); rs->dma_rx.ch = NULL; } if (rs->dma_tx.ch && rs->dma_rx.ch) { dma_get_slave_caps(rs->dma_rx.ch, &(rs->dma_caps)); rs->dma_tx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_TXDR); rs->dma_rx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_RXDR); rs->dma_tx.direction = DMA_MEM_TO_DEV; rs->dma_rx.direction = DMA_DEV_TO_MEM; master->can_dma = rockchip_spi_can_dma; master->dma_tx = rs->dma_tx.ch; master->dma_rx = rs->dma_rx.ch; } ret = devm_spi_register_master(&pdev->dev, master); if (ret) { dev_err(&pdev->dev, "Failed to register master\n"); goto err_register_master; } return 0; err_register_master: pm_runtime_disable(&pdev->dev); if (rs->dma_tx.ch) dma_release_channel(rs->dma_tx.ch); if (rs->dma_rx.ch) dma_release_channel(rs->dma_rx.ch); err_get_fifo_len: clk_disable_unprepare(rs->spiclk); err_spiclk_enable: clk_disable_unprepare(rs->apb_pclk); err_ioremap_resource: spi_master_put(master); return ret; }