static int serial_pxa_suspend(struct device *dev)
{
        struct uart_pxa_port *sport = dev_get_drvdata(dev);
	struct uart_pxa_dma *pxa_dma = &sport->uart_dma;
	struct dma_tx_state dma_state;

	if (sport && (sport->ier & UART_IER_DMAE)) {
		int sent = 0;
		unsigned long flags;

		local_irq_save(flags);
		/*
		 * tx stop and suspend and when resume,
		 * tx startup would be called and set it to 0
		*/
		pxa_dma->tx_stop = 1;
		pxa_dma->rx_stop = 1;
		pxa_dma->tx_saved_len = 0;
		if (dma_async_is_tx_complete(pxa_dma->txdma_chan,
					     pxa_dma->tx_cookie, NULL, NULL)
			!= DMA_SUCCESS) {
			dmaengine_pause(pxa_dma->txdma_chan);
			dmaengine_tx_status(pxa_dma->txdma_chan,
					    pxa_dma->tx_cookie, &dma_state);
			sent = pxa_dma->tx_size - dma_state.residue;
			pxa_dma->tx_saved_len = dma_state.residue;
			memcpy(pxa_dma->tx_buf_save, pxa_dma->txdma_addr + sent,
			       dma_state.residue);
			stop_dma(sport, PXA_UART_TX);
		}
		if (dma_async_is_tx_complete(pxa_dma->rxdma_chan,
					     pxa_dma->rx_cookie, NULL, NULL)
			!= DMA_SUCCESS) {
			dmaengine_pause(pxa_dma->rxdma_chan);
			pxa_uart_receive_dma_cb(sport);
			stop_dma(sport, PXA_UART_RX);
		}
		local_irq_restore(flags);
	}

	if (sport)
		uart_suspend_port(&serial_pxa_reg, &sport->port);

	/* Remove uart rx constraint which will block system entering D1p. */
	if (del_timer_sync(&sport->pxa_timer))
		pxa_timer_handler((unsigned long)sport);

        return 0;
}
static void pxa_uart_transmit_dma_cb(void *data)
{
	struct uart_pxa_port *up = (struct uart_pxa_port *)data;
	struct uart_pxa_dma *pxa_dma = &up->uart_dma;
	struct circ_buf *xmit = &up->port.state->xmit;

	if (dma_async_is_tx_complete(pxa_dma->txdma_chan, pxa_dma->tx_cookie,
				     NULL, NULL) == DMA_SUCCESS)
		schedule_work(&up->uart_tx_lpm_work);

	spin_lock_irqsave(&up->port.lock, up->flags);
	/*
	 * DMA_RUNNING flag should be clear only after
	 * all dma interface operation completed
	 */
	pxa_dma->dma_status &= ~TX_DMA_RUNNING;
	spin_unlock_irqrestore(&up->port.lock, up->flags);

	/* if tx stop, stop transmit DMA and return */
	if (pxa_dma->tx_stop)
		return;

	if (up->port.x_char) {
		serial_out(up, UART_TX, up->port.x_char);
		up->port.icount.tx++;
		up->port.x_char = 0;
	}

	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
		uart_write_wakeup(&up->port);

	if (!uart_circ_empty(xmit))
		tasklet_schedule(&pxa_dma->tklet);
	return;
}
static void serial_pxa_stop_tx(struct uart_port *port)
{
	struct uart_pxa_port *up = (struct uart_pxa_port *)port;
	unsigned int timeout = 0x100000;

	if (up->dma_enable) {
		up->uart_dma.tx_stop = 1;

		if (up->ier & UART_IER_DMAE) {
			/*
			 * Here we cannot use dma_status to determine
			 * whether dma has been transfer completed
			 * As there when this function is being caled,
			 * it would hold a spinlock and possible with irqsave,
			 * If this function is being called over core0,
			 * we may cannot get status change from the flag,
			 * As irq handler is being blocked to be called yet.
			 */
			while (dma_async_is_tx_complete(up->uart_dma.txdma_chan,
			       up->uart_dma.tx_cookie, NULL, NULL)
				!= DMA_SUCCESS && (timeout-- > 0))
				udelay(1);

			BUG_ON(timeout == 0);
		}
	} else {
		if (up->ier & UART_IER_THRI) {
			up->ier &= ~UART_IER_THRI;
			serial_out(up, UART_IER, up->ier);
		}
	}
}
enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
{
	enum dma_status status;
	unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);

	dma_async_issue_pending(chan);
	do {
		status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
			printk(KERN_ERR "dma_sync_wait_timeout!\n");
			return DMA_ERROR;
		}
	} while (status == DMA_IN_PROGRESS);

	return status;
}
示例#5
0
enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
{
    enum dma_status status;
    unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);

    dma_async_issue_pending(chan);
    do {
        status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
        if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
            pr_err("%s: timeout!\n", __func__);
            return DMA_ERROR;
        }
        if (status != DMA_IN_PROGRESS)
            break;
        cpu_relax();
    } while (1);

    return status;
}
示例#6
0
static void dma_callback(void *param)
{
	struct drvr_mem *mem_dev = (struct drvr_mem*) param;
	struct dma_chan *chan = mem_dev->dma.chan;
	enum dma_status status;

	status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);

	switch (status) {
		case DMA_COMPLETE:
			irqraised1 = 1;
			break;

		case DMA_ERROR:
			irqraised1 = -1;
			break;

		default:
			irqraised1 = -1;
			break;
	}

	complete(&dma_comp);
}