Beispiel #1
0
static void s3c24xx_serial_rx_dma_complete(void *args)
{
	struct s3c24xx_uart_port *ourport = args;
	struct uart_port *port = &ourport->port;

	struct s3c24xx_uart_dma *dma = ourport->dma;
	struct tty_port *t = &port->state->port;
	struct tty_struct *tty = tty_port_tty_get(&ourport->port.state->port);

	struct dma_tx_state state;
	unsigned long flags;
	int received;

	dmaengine_tx_status(dma->rx_chan,  dma->rx_cookie, &state);
	received  = dma->rx_bytes_requested - state.residue;
	async_tx_ack(dma->rx_desc);

	spin_lock_irqsave(&port->lock, flags);

	if (received)
		s3c24xx_uart_copy_rx_to_tty(ourport, t, received);

	if (tty) {
		tty_flip_buffer_push(t);
		tty_kref_put(tty);
	}

	s3c64xx_start_rx_dma(ourport);

	spin_unlock_irqrestore(&port->lock, flags);
}
Beispiel #2
0
static int mpodp_tx_is_done(struct mpodp_if_priv *priv, struct mpodp_txq *txq,
			    int index)
{
	return (dmaengine_tx_status
		(priv->tx_chan[txq->ring[index].chanidx],
		 txq->ring[index].cookie, NULL) == DMA_SUCCESS);
}
Beispiel #3
0
static void mtk8250_dma_rx_complete(void *param)
{
	struct uart_8250_port *up = param;
	struct uart_8250_dma *dma = up->dma;
	struct mtk8250_data *data = up->port.private_data;
	struct tty_port *tty_port = &up->port.state->port;
	struct dma_tx_state state;
	unsigned char *ptr;
	int copied;

	dma_sync_single_for_cpu(dma->rxchan->device->dev, dma->rx_addr,
				dma->rx_size, DMA_FROM_DEVICE);

	dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);

	if (data->rx_status == DMA_RX_SHUTDOWN)
		return;

	if ((data->rx_pos + state.residue) <= dma->rx_size) {
		ptr = (unsigned char *)(data->rx_pos + dma->rx_buf);
		copied = tty_insert_flip_string(tty_port, ptr, state.residue);
	} else {
		ptr = (unsigned char *)(data->rx_pos + dma->rx_buf);
		copied = tty_insert_flip_string(tty_port, ptr,
						dma->rx_size - data->rx_pos);
		ptr = (unsigned char *)(dma->rx_buf);
		copied += tty_insert_flip_string(tty_port, ptr,
				data->rx_pos + state.residue - dma->rx_size);
	}
	up->port.icount.rx += copied;

	tty_flip_buffer_push(tty_port);

	mtk8250_rx_dma(up);
}
Beispiel #4
0
static void s3c24xx_serial_tx_dma_complete(void *args)
{
	struct s3c24xx_uart_port *ourport = args;
	struct uart_port *port = &ourport->port;
	struct circ_buf *xmit = &port->state->xmit;
	struct s3c24xx_uart_dma *dma = ourport->dma;
	struct dma_tx_state state;
	unsigned long flags;
	int count;


	dmaengine_tx_status(dma->tx_chan, dma->tx_cookie, &state);
	count = dma->tx_bytes_requested - state.residue;
	async_tx_ack(dma->tx_desc);

	dma_sync_single_for_cpu(ourport->port.dev, dma->tx_transfer_addr,
				dma->tx_size, DMA_TO_DEVICE);

	spin_lock_irqsave(&port->lock, flags);

	xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
	port->icount.tx += count;
	ourport->tx_in_progress = 0;

	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
		uart_write_wakeup(port);

	s3c24xx_serial_start_next_tx(ourport);
	spin_unlock_irqrestore(&port->lock, flags);
}
Beispiel #5
0
static void s3c24xx_serial_stop_rx(struct uart_port *port)
{
	struct s3c24xx_uart_port *ourport = to_ourport(port);
	struct s3c24xx_uart_dma *dma = ourport->dma;
	struct tty_port *t = &port->state->port;
	struct dma_tx_state state;
	enum dma_status dma_status;
	unsigned int received;

	if (rx_enabled(port)) {
		dbg("s3c24xx_serial_stop_rx: port=%p\n", port);
		if (s3c24xx_serial_has_interrupt_mask(port))
			s3c24xx_set_bit(port, S3C64XX_UINTM_RXD,
					S3C64XX_UINTM);
		else
			disable_irq_nosync(ourport->rx_irq);
		rx_enabled(port) = 0;
	}
	if (dma && dma->rx_chan) {
		dmaengine_pause(dma->tx_chan);
		dma_status = dmaengine_tx_status(dma->rx_chan,
				dma->rx_cookie, &state);
		if (dma_status == DMA_IN_PROGRESS ||
			dma_status == DMA_PAUSED) {
			received = dma->rx_bytes_requested - state.residue;
			dmaengine_terminate_all(dma->rx_chan);
			s3c24xx_uart_copy_rx_to_tty(ourport, t, received);
		}
	}
}
Beispiel #6
0
static void mtk8250_rx_dma(struct uart_8250_port *up)
{
	struct uart_8250_dma *dma = up->dma;
	struct mtk8250_data *data = up->port.private_data;
	struct dma_async_tx_descriptor	*desc;
	struct dma_tx_state	 state;

	desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
					   dma->rx_size, DMA_DEV_TO_MEM,
					   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!desc) {
		pr_err("failed to prepare rx slave single\n");
		return;
	}

	desc->callback = mtk8250_dma_rx_complete;
	desc->callback_param = up;

	dma->rx_cookie = dmaengine_submit(desc);

	dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
	data->rx_pos = state.residue;

	dma_sync_single_for_device(dma->rxchan->device->dev, dma->rx_addr,
				   dma->rx_size, DMA_FROM_DEVICE);

	dma_async_issue_pending(dma->rxchan);
}
Beispiel #7
0
static void __dma_rx_complete(void *param)
{
	struct uart_8250_port *p = param;
	struct omap8250_priv *priv = p->port.private_data;
	struct uart_8250_dma *dma = p->dma;
	struct dma_tx_state     state;
	unsigned long flags;

	spin_lock_irqsave(&p->port.lock, flags);

	/*
	 * If the tx status is not DMA_COMPLETE, then this is a delayed
	 * completion callback. A previous RX timeout flush would have
	 * already pushed the data, so exit.
	 */
	if (dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state) !=
			DMA_COMPLETE) {
		spin_unlock_irqrestore(&p->port.lock, flags);
		return;
	}
	__dma_rx_do_complete(p);
	if (!priv->throttled)
		omap_8250_rx_dma(p);

	spin_unlock_irqrestore(&p->port.lock, flags);
}
Beispiel #8
0
static void omap_8250_rx_dma_flush(struct uart_8250_port *p)
{
	struct omap8250_priv	*priv = p->port.private_data;
	struct uart_8250_dma	*dma = p->dma;
	struct dma_tx_state     state;
	unsigned long		flags;
	int ret;

	spin_lock_irqsave(&priv->rx_dma_lock, flags);

	if (!dma->rx_running) {
		spin_unlock_irqrestore(&priv->rx_dma_lock, flags);
		return;
	}

	ret = dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
	if (ret == DMA_IN_PROGRESS) {
		ret = dmaengine_pause(dma->rxchan);
		if (WARN_ON_ONCE(ret))
			priv->rx_dma_broken = true;
	}
	spin_unlock_irqrestore(&priv->rx_dma_lock, flags);

	__dma_rx_do_complete(p);
	dmaengine_terminate_all(dma->rxchan);
}
Beispiel #9
0
static void __dma_rx_do_complete(struct uart_8250_port *p)
{
	struct omap8250_priv	*priv = p->port.private_data;
	struct uart_8250_dma    *dma = p->dma;
	struct tty_port         *tty_port = &p->port.state->port;
	struct dma_tx_state     state;
	int                     count;
	unsigned long		flags;
	int			ret;

	spin_lock_irqsave(&priv->rx_dma_lock, flags);

	if (!dma->rx_running)
		goto unlock;

	dma->rx_running = 0;
	dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);

	count = dma->rx_size - state.residue;

	ret = tty_insert_flip_string(tty_port, dma->rx_buf, count);

	p->port.icount.rx += ret;
	p->port.icount.buf_overrun += count - ret;
unlock:
	spin_unlock_irqrestore(&priv->rx_dma_lock, flags);

	tty_flip_buffer_push(tty_port);
}
Beispiel #10
0
static void tegra_uart_rx_dma_complete(void *args)
{
	struct tegra_uart_port *tup = args;
	struct uart_port *u = &tup->uport;
	unsigned long flags;
	struct dma_tx_state state;
	enum dma_status status;

	spin_lock_irqsave(&u->lock, flags);

	status = dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);

	if (status == DMA_IN_PROGRESS) {
		dev_dbg(tup->uport.dev, "RX DMA is in progress\n");
		goto done;
	}

	/* Deactivate flow control to stop sender */
	if (tup->rts_active)
		set_rts(tup, false);

	tegra_uart_rx_buffer_push(tup, 0);
	tegra_uart_start_rx_dma(tup);

	/* Activate flow control to start transfer */
	if (tup->rts_active)
		set_rts(tup, true);

done:
	spin_unlock_irqrestore(&u->lock, flags);
}
Beispiel #11
0
static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup,
		unsigned long *flags)
{
	struct dma_tx_state state;
	struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
	struct tty_port *port = &tup->uport.state->port;
	struct uart_port *u = &tup->uport;
	int count;

	/* Deactivate flow control to stop sender */
	if (tup->rts_active)
		set_rts(tup, false);

	dmaengine_terminate_all(tup->rx_dma_chan);
	dmaengine_tx_status(tup->rx_dma_chan,  tup->rx_cookie, &state);
	async_tx_ack(tup->rx_dma_desc);
	count = tup->rx_bytes_requested - state.residue;

	/* If we are here, DMA is stopped */
	if (count)
		tegra_uart_copy_rx_to_tty(tup, port, count);

	tegra_uart_handle_rx_pio(tup, port);
	if (tty) {
		spin_unlock_irqrestore(&u->lock, *flags);
		tty_flip_buffer_push(port);
		spin_lock_irqsave(&u->lock, *flags);
		tty_kref_put(tty);
	}
	tegra_uart_start_rx_dma(tup);

	if (tup->rts_active)
		set_rts(tup, true);
}
Beispiel #12
0
/*
 * DMA interrupt handler.
 */
static void pxa_ata_dma_irq(void *d)
{
	struct pata_pxa_data *pd = d;
	enum dma_status status;

	status = dmaengine_tx_status(pd->dma_chan, pd->dma_cookie, NULL);
	if (status == DMA_ERROR || status == DMA_COMPLETE)
		complete(&pd->dma_done);
}
Beispiel #13
0
static void sirfsoc_rx_tmo_process_tl(unsigned long param)
{
	struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
	struct uart_port *port = &sirfport->port;
	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
	struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st;
	unsigned int count;
	unsigned long flags;
	struct dma_tx_state tx_state;

	spin_lock_irqsave(&port->lock, flags);
	while (DMA_COMPLETE == dmaengine_tx_status(sirfport->rx_dma_chan,
		sirfport->rx_dma_items[sirfport->rx_completed].cookie, &tx_state)) {
		sirfsoc_uart_insert_rx_buf_to_tty(sirfport,
					SIRFSOC_RX_DMA_BUF_SIZE);
		sirfport->rx_completed++;
		sirfport->rx_completed %= SIRFSOC_RX_LOOP_BUF_CNT;
	}
	count = CIRC_CNT(sirfport->rx_dma_items[sirfport->rx_issued].xmit.head,
		sirfport->rx_dma_items[sirfport->rx_issued].xmit.tail,
		SIRFSOC_RX_DMA_BUF_SIZE);
	if (count > 0)
		sirfsoc_uart_insert_rx_buf_to_tty(sirfport, count);
	wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
			rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
			SIRFUART_IO_MODE);
	sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
	if (sirfport->rx_io_count == 4) {
		sirfport->rx_io_count = 0;
		wr_regl(port, ureg->sirfsoc_int_st_reg,
				uint_st->sirfsoc_rx_done);
		if (!sirfport->is_atlas7)
			wr_regl(port, ureg->sirfsoc_int_en_reg,
				rd_regl(port, ureg->sirfsoc_int_en_reg) &
				~(uint_en->sirfsoc_rx_done_en));
		else
			wr_regl(port, SIRFUART_INT_EN_CLR,
					uint_en->sirfsoc_rx_done_en);
		sirfsoc_uart_start_next_rx_dma(port);
	} else {
		wr_regl(port, ureg->sirfsoc_int_st_reg,
				uint_st->sirfsoc_rx_done);
		if (!sirfport->is_atlas7)
			wr_regl(port, ureg->sirfsoc_int_en_reg,
				rd_regl(port, ureg->sirfsoc_int_en_reg) |
				(uint_en->sirfsoc_rx_done_en));
		else
			wr_regl(port, ureg->sirfsoc_int_en_reg,
					uint_en->sirfsoc_rx_done_en);
	}
	spin_unlock_irqrestore(&port->lock, flags);
	tty_flip_buffer_push(&port->state->port);
}
Beispiel #14
0
static int mpodp_rx_is_done(struct mpodp_if_priv *priv, struct mpodp_rxq *rxq,
			    int index)
{
	if (rxq->ring[index].len == 0) {
		/* error packet, always done */
		return 1;
	}

	return (dmaengine_tx_status(priv->rx_chan, rxq->ring[index].cookie,
				    NULL) == DMA_SUCCESS);
}
static void tegra_uart_rx_dma_complete(void *args)
{
	struct tegra_uart_port *tup = args;
	struct uart_port *u = &tup->uport;
	int count = tup->rx_bytes_requested;
	struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
	struct tty_port *port = &u->state->port;
	unsigned long flags;
	int rx_level = 0;
	struct dma_tx_state state;
	enum dma_status status;

	spin_lock_irqsave(&u->lock, flags);
	async_tx_ack(tup->rx_dma_desc);

	status = dmaengine_tx_status(tup->rx_dma_chan,
			tup->rx_cookie, &state);
	if (status == DMA_IN_PROGRESS) {
		dev_info(tup->uport.dev, "RX DMA is in progress\n");
		goto done;
	}

	/* Deactivate flow control to stop sender */
	if (tup->rts_active)
		set_rts(tup, false);

	/* If we are here, DMA is stopped */
	if (count)
		tegra_uart_copy_rx_to_tty(tup, port, count);

	tegra_uart_handle_rx_pio(tup, port);

	if (tup->enable_rx_buffer_throttle) {
		rx_level = tty_buffer_get_level(port);
		if (rx_level > 70)
			mod_timer(&tup->timer,
					jiffies + tup->timer_timeout_jiffies);
	}

	if (tty) {
		tty_flip_buffer_push(port);
		tty_kref_put(tty);
	}
	tegra_uart_start_rx_dma(tup);

	/* Activate flow control to start transfer */
	if (tup->enable_rx_buffer_throttle) {
		if ((rx_level <= 70) && tup->rts_active)
			set_rts(tup, true);
	} else if (tup->rts_active)
		set_rts(tup, true);
done:
	spin_unlock_irqrestore(&u->lock, flags);
}
Beispiel #16
0
/*
 * Wait until the DMA transfer completes, then stop the DMA controller.
 */
static void pxa_bmdma_stop(struct ata_queued_cmd *qc)
{
	struct pata_pxa_data *pd = qc->ap->private_data;
	enum dma_status status;

	status = dmaengine_tx_status(pd->dma_chan, pd->dma_cookie, NULL);
	if (status != DMA_ERROR && status != DMA_COMPLETE &&
	    wait_for_completion_timeout(&pd->dma_done, HZ))
		ata_dev_err(qc->dev, "Timeout waiting for DMA completion!");

	dmaengine_terminate_all(pd->dma_chan);
}
Beispiel #17
0
/*
 * Read DMA status. The bmdma_stop() will take care of properly finishing the
 * DMA transfer so we always have DMA-complete interrupt here.
 */
static unsigned char pxa_bmdma_status(struct ata_port *ap)
{
	struct pata_pxa_data *pd = ap->private_data;
	unsigned char ret = ATA_DMA_INTR;
	struct dma_tx_state state;
	enum dma_status status;

	status = dmaengine_tx_status(pd->dma_chan, pd->dma_cookie, &state);
	if (status != DMA_COMPLETE)
		ret |= ATA_DMA_ERR;

	return ret;
}
static int serial_pxa_suspend(struct device *dev)
{
        struct uart_pxa_port *sport = dev_get_drvdata(dev);
	struct uart_pxa_dma *pxa_dma = &sport->uart_dma;
	struct dma_tx_state dma_state;

	if (sport && (sport->ier & UART_IER_DMAE)) {
		int sent = 0;
		unsigned long flags;

		local_irq_save(flags);
		/*
		 * tx stop and suspend and when resume,
		 * tx startup would be called and set it to 0
		*/
		pxa_dma->tx_stop = 1;
		pxa_dma->rx_stop = 1;
		pxa_dma->tx_saved_len = 0;
		if (dma_async_is_tx_complete(pxa_dma->txdma_chan,
					     pxa_dma->tx_cookie, NULL, NULL)
			!= DMA_SUCCESS) {
			dmaengine_pause(pxa_dma->txdma_chan);
			dmaengine_tx_status(pxa_dma->txdma_chan,
					    pxa_dma->tx_cookie, &dma_state);
			sent = pxa_dma->tx_size - dma_state.residue;
			pxa_dma->tx_saved_len = dma_state.residue;
			memcpy(pxa_dma->tx_buf_save, pxa_dma->txdma_addr + sent,
			       dma_state.residue);
			stop_dma(sport, PXA_UART_TX);
		}
		if (dma_async_is_tx_complete(pxa_dma->rxdma_chan,
					     pxa_dma->rx_cookie, NULL, NULL)
			!= DMA_SUCCESS) {
			dmaengine_pause(pxa_dma->rxdma_chan);
			pxa_uart_receive_dma_cb(sport);
			stop_dma(sport, PXA_UART_RX);
		}
		local_irq_restore(flags);
	}

	if (sport)
		uart_suspend_port(&serial_pxa_reg, &sport->port);

	/* Remove uart rx constraint which will block system entering D1p. */
	if (del_timer_sync(&sport->pxa_timer))
		pxa_timer_handler((unsigned long)sport);

        return 0;
}
Beispiel #19
0
static void tegra_uart_stop_tx(struct uart_port *u)
{
	struct tegra_uart_port *tup = to_tegra_uport(u);
	struct circ_buf *xmit = &tup->uport.state->xmit;
	struct dma_tx_state state;
	int count;

	dmaengine_terminate_all(tup->tx_dma_chan);
	dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
	count = tup->tx_bytes_requested - state.residue;
	async_tx_ack(tup->tx_dma_desc);
	xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
	tup->tx_in_progress = 0;
	return;
}
int serial8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
{
	struct uart_8250_dma		*dma = p->dma;
	struct dma_async_tx_descriptor	*desc;
	struct dma_tx_state		state;
	int				dma_status;

	dma_status = dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);

	switch (iir & 0x3f) {
	case UART_IIR_RLSI:
		/* 8250_core handles errors and break interrupts */
		return -EIO;
	case UART_IIR_RX_TIMEOUT:
		/*
		 * If RCVR FIFO trigger level was not reached, complete the
		 * transfer and let 8250_core copy the remaining data.
		 */
		if (dma_status == DMA_IN_PROGRESS) {
			dmaengine_pause(dma->rxchan);
			__dma_rx_complete(p);
		}
		return -ETIMEDOUT;
	default:
		break;
	}

	if (dma_status)
		return 0;

	desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
					   dma->rx_size, DMA_DEV_TO_MEM,
					   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!desc)
		return -EBUSY;

	desc->callback = __dma_rx_complete;
	desc->callback_param = p;

	dma->rx_cookie = dmaengine_submit(desc);

	dma_sync_single_for_device(dma->rxchan->device->dev, dma->rx_addr,
				   dma->rx_size, DMA_FROM_DEVICE);

	dma_async_issue_pending(dma->rxchan);

	return 0;
}
Beispiel #21
0
static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup)
{
	struct dma_tx_state state;

	/* Deactivate flow control to stop sender */
	if (tup->rts_active)
		set_rts(tup, false);

	dmaengine_terminate_all(tup->rx_dma_chan);
	dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
	tegra_uart_rx_buffer_push(tup, state.residue);
	tegra_uart_start_rx_dma(tup);

	if (tup->rts_active)
		set_rts(tup, true);
}
Beispiel #22
0
static irqreturn_t s3c24xx_serial_rx_chars_dma(void *dev_id)
{
	unsigned int utrstat, ufstat, received;
	struct s3c24xx_uart_port *ourport = dev_id;
	struct uart_port *port = &ourport->port;
	struct s3c24xx_uart_dma *dma = ourport->dma;
	struct tty_struct *tty = tty_port_tty_get(&ourport->port.state->port);
	struct tty_port *t = &port->state->port;
	unsigned long flags;
	struct dma_tx_state state;

	utrstat = rd_regl(port, S3C2410_UTRSTAT);
	ufstat = rd_regl(port, S3C2410_UFSTAT);

	spin_lock_irqsave(&port->lock, flags);

	if (!(utrstat & S3C2410_UTRSTAT_TIMEOUT)) {
		s3c64xx_start_rx_dma(ourport);
		if (ourport->rx_mode == S3C24XX_RX_PIO)
			enable_rx_dma(ourport);
		goto finish;
	}

	if (ourport->rx_mode == S3C24XX_RX_DMA) {
		dmaengine_pause(dma->rx_chan);
		dmaengine_tx_status(dma->rx_chan, dma->rx_cookie, &state);
		dmaengine_terminate_all(dma->rx_chan);
		received = dma->rx_bytes_requested - state.residue;
		s3c24xx_uart_copy_rx_to_tty(ourport, t, received);

		enable_rx_pio(ourport);
	}

	s3c24xx_serial_rx_drain_fifo(ourport);

	if (tty) {
		tty_flip_buffer_push(t);
		tty_kref_put(tty);
	}

	wr_regl(port, S3C2410_UTRSTAT, S3C2410_UTRSTAT_TIMEOUT);

finish:
	spin_unlock_irqrestore(&port->lock, flags);

	return IRQ_HANDLED;
}
/**
 * snd_dmaengine_pcm_pointer - dmaengine based PCM pointer implementation
 * @substream: PCM substream
 *
 * This function can be used as the PCM pointer callback for dmaengine based PCM
 * driver implementations.
 */
snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream)
{
	struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
	struct dma_tx_state state;
	enum dma_status status;
	unsigned int buf_size;
	unsigned int pos = 0;

	status = dmaengine_tx_status(prtd->dma_chan, prtd->cookie, &state);
	if (status == DMA_IN_PROGRESS || status == DMA_PAUSED) {
		buf_size = snd_pcm_lib_buffer_bytes(substream);
		if (state.residue > 0 && state.residue <= buf_size)
			pos = buf_size - state.residue;
	}

	return bytes_to_frames(substream->runtime, pos);
}
static void pxa_uart_receive_dma_cb(void *data)
{
	unsigned long flags;
	struct uart_pxa_port *up = (struct uart_pxa_port *)data;
	struct uart_pxa_dma *pxa_dma = &up->uart_dma;
	struct tty_port *port = &up->port.state->port;
	unsigned int count;
	unsigned char *tmp = pxa_dma->rxdma_addr;
	struct dma_tx_state dma_state;

	if (!mod_timer(&up->pxa_timer, jiffies + PXA_TIMER_TIMEOUT))
		pm_qos_update_request(&up->qos_idle[PXA_UART_RX],
				      up->lpm_qos);

	dmaengine_tx_status(pxa_dma->rxdma_chan, pxa_dma->rx_cookie,
			    &dma_state);
	count = DMA_BLOCK - dma_state.residue;
	if (up->port.sysrq) {
		while (count > 0) {
			if (!uart_handle_sysrq_char(&up->port, *tmp)) {
				tty_insert_flip_char(port, *tmp, TTY_NORMAL);
				up->port.icount.rx++;
			}
			tmp++;
			count--;
		}
	} else {
		tty_insert_flip_string(port, tmp, count);
		up->port.icount.rx += count;
	}
	tty_flip_buffer_push(port);

	spin_lock_irqsave(&up->port.lock, flags);
	/*
	 * DMA_RUNNING flag should be clear only after
	 * all dma interface operation completed
	 */
	pxa_dma->dma_status &= ~RX_DMA_RUNNING;
	spin_unlock_irqrestore(&up->port.lock, flags);

	if (pxa_dma->rx_stop)
		return;
	pxa_uart_receive_dma_start(up);
	return;
}
Beispiel #25
0
static void __dma_rx_complete(void *param)
{
	struct uart_8250_port	*p = param;
	struct uart_8250_dma	*dma = p->dma;
	struct tty_port		*tty_port = &p->port.state->port;
	struct dma_tx_state	state;
	int			count;

	dma->rx_running = 0;
	dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);

	count = dma->rx_size - state.residue;

	tty_insert_flip_string(tty_port, dma->rx_buf, count);
	p->port.icount.rx += count;

	tty_flip_buffer_push(tty_port);
}
Beispiel #26
0
static void __dma_rx_complete(void *param)
{
	struct uart_8250_port	*p = param;
	struct uart_8250_dma	*dma = p->dma;
	struct tty_struct	*tty = p->port.state->port.tty;
	struct dma_tx_state	state;

	dma_sync_single_for_cpu(dma->rxchan->device->dev, dma->rx_addr,
				dma->rx_size, DMA_FROM_DEVICE);

	dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
	dmaengine_terminate_all(dma->rxchan);

	tty_insert_flip_string(tty, dma->rx_buf, dma->rx_size - state.residue);
	p->port.icount.rx += dma->rx_size - state.residue;

	tty_flip_buffer_push(tty);
}
Beispiel #27
0
static void tegra_uart_tx_dma_complete(void *args)
{
	struct tegra_uart_port *tup = args;
	struct circ_buf *xmit = &tup->uport.state->xmit;
	struct dma_tx_state state;
	unsigned long flags;
	unsigned int count;

	dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
	count = tup->tx_bytes_requested - state.residue;
	async_tx_ack(tup->tx_dma_desc);
	spin_lock_irqsave(&tup->uport.lock, flags);
	xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
	tup->tx_in_progress = 0;
	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
		uart_write_wakeup(&tup->uport);
	tegra_uart_start_next_tx(tup);
	spin_unlock_irqrestore(&tup->uport.lock, flags);
}
static void tegra_uart_stop_rx(struct uart_port *u)
{
	struct tegra_uart_port *tup = to_tegra_uport(u);
	struct tty_struct *tty;
	struct tty_port *port = &u->state->port;
	struct dma_tx_state state;
	unsigned long ier;
	int count;

	if (tup->rts_active)
		set_rts(tup, false);

	if (!tup->rx_in_progress)
		return;

	tty = tty_port_tty_get(&tup->uport.state->port);

	tegra_uart_wait_sym_time(tup, 1); /* wait a character interval */

	ier = tup->ier_shadow;
	ier &= ~(UART_IER_RDI | UART_IER_RLSI | UART_IER_RTOIE |
					TEGRA_UART_IER_EORD);
	tup->ier_shadow = ier;
	tegra_uart_write(tup, ier, UART_IER);
	tup->rx_in_progress = 0;
	if (tup->rx_dma_chan && !tup->use_rx_pio) {
		dmaengine_terminate_all(tup->rx_dma_chan);
		dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
		async_tx_ack(tup->rx_dma_desc);
		count = tup->rx_bytes_requested - state.residue;
		if (count)
			tegra_uart_copy_rx_to_tty(tup, port, count);
		tegra_uart_handle_rx_pio(tup, port);
	} else {
		tegra_uart_handle_rx_pio(tup, port);
	}
	if (tty) {
		tty_flip_buffer_push(port);
		tty_kref_put(tty);
	}
	return;
}
static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup)
{
	struct dma_tx_state state;
	struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
	struct tty_port *port = &tup->uport.state->port;
	int count;
	int rx_level = 0;

	/* Deactivate flow control to stop sender */
	if (tup->rts_active)
		set_rts(tup, false);

	dmaengine_terminate_all(tup->rx_dma_chan);
	dmaengine_tx_status(tup->rx_dma_chan,  tup->rx_cookie, &state);
	async_tx_ack(tup->rx_dma_desc);
	count = tup->rx_bytes_requested - state.residue;

	/* If we are here, DMA is stopped */
	if (count)
		tegra_uart_copy_rx_to_tty(tup, port, count);

	tegra_uart_handle_rx_pio(tup, port);

	if (tup->enable_rx_buffer_throttle) {
		rx_level = tty_buffer_get_level(port);
		if (rx_level > 70)
			mod_timer(&tup->timer,
					jiffies + tup->timer_timeout_jiffies);
	}

	if (tty) {
		tty_flip_buffer_push(port);
		tty_kref_put(tty);
	}
	tegra_uart_start_rx_dma(tup);

	if (tup->enable_rx_buffer_throttle) {
		if ((rx_level <= 70) && tup->rts_active)
			set_rts(tup, true);
	} else if (tup->rts_active)
		set_rts(tup, true);
}
Beispiel #30
0
static void sirfsoc_uart_handle_rx_tmo(struct sirfsoc_uart_port *sirfport)
{
	struct uart_port *port = &sirfport->port;
	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
	struct dma_tx_state tx_state;
	dmaengine_tx_status(sirfport->rx_dma_chan,
		sirfport->rx_dma_items[sirfport->rx_issued].cookie, &tx_state);
	dmaengine_terminate_all(sirfport->rx_dma_chan);
	sirfport->rx_dma_items[sirfport->rx_issued].xmit.head =
		SIRFSOC_RX_DMA_BUF_SIZE - tx_state.residue;
	if (!sirfport->is_atlas7)
		wr_regl(port, ureg->sirfsoc_int_en_reg,
			rd_regl(port, ureg->sirfsoc_int_en_reg) &
			~(uint_en->sirfsoc_rx_timeout_en));
	else
		wr_regl(port, SIRFUART_INT_EN_CLR,
				uint_en->sirfsoc_rx_timeout_en);
	tasklet_schedule(&sirfport->rx_tmo_process_tasklet);
}