Esempio n. 1
0
static void pmz_transmit_chars(struct uart_pmac_port *uap)
{
	struct circ_buf *xmit;

	if (ZS_IS_ASLEEP(uap))
		return;
	if (ZS_IS_CONS(uap)) {
		unsigned char status = read_zsreg(uap, R0);

		/* TX still busy?  Just wait for the next TX done interrupt.
		 *
		 * It can occur because of how we do serial console writes.  It would
		 * be nice to transmit console writes just like we normally would for
		 * a TTY line. (ie. buffered and TX interrupt driven).  That is not
		 * easy because console writes cannot sleep.  One solution might be
		 * to poll on enough port->xmit space becomming free.  -DaveM
		 */
		if (!(status & Tx_BUF_EMP))
			return;
	}

	uap->flags &= ~PMACZILOG_FLAG_TX_ACTIVE;

	if (ZS_REGS_HELD(uap)) {
		pmz_load_zsregs(uap, uap->curregs);
		uap->flags &= ~PMACZILOG_FLAG_REGS_HELD;
	}

	if (ZS_TX_STOPPED(uap)) {
		uap->flags &= ~PMACZILOG_FLAG_TX_STOPPED;
		goto ack_tx_int;
	}

	if (uap->port.x_char) {
		uap->flags |= PMACZILOG_FLAG_TX_ACTIVE;
		write_zsdata(uap, uap->port.x_char);
		zssync(uap);
		uap->port.icount.tx++;
		uap->port.x_char = 0;
		return;
	}

	if (uap->port.info == NULL)
		goto ack_tx_int;
	xmit = &uap->port.info->xmit;
	if (uart_circ_empty(xmit)) {
		uart_write_wakeup(&uap->port);
		goto ack_tx_int;
	}
	if (uart_tx_stopped(&uap->port))
		goto ack_tx_int;

	uap->flags |= PMACZILOG_FLAG_TX_ACTIVE;
	write_zsdata(uap, xmit->buf[xmit->tail]);
	zssync(uap);

	xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
	uap->port.icount.tx++;

	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
		uart_write_wakeup(&uap->port);

	return;

ack_tx_int:
	write_zsreg(uap, R0, RES_Tx_P);
	zssync(uap);
}
Esempio n. 2
0
static void ip22zilog_transmit_chars(struct uart_ip22zilog_port *up,
				    struct zilog_channel *channel)
{
	struct circ_buf *xmit;

	if (ZS_IS_CONS(up)) {
		unsigned char status = readb(&channel->control);
		ZSDELAY();

		/* TX still busy?  Just wait for the next TX done interrupt.
		 *
		 * It can occur because of how we do serial console writes.  It would
		 * be nice to transmit console writes just like we normally would for
		 * a TTY line. (ie. buffered and TX interrupt driven).  That is not
		 * easy because console writes cannot sleep.  One solution might be
		 * to poll on enough port->xmit space becoming free.  -DaveM
		 */
		if (!(status & Tx_BUF_EMP))
			return;
	}

	up->flags &= ~IP22ZILOG_FLAG_TX_ACTIVE;

	if (ZS_REGS_HELD(up)) {
		__load_zsregs(channel, up->curregs);
		up->flags &= ~IP22ZILOG_FLAG_REGS_HELD;
	}

	if (ZS_TX_STOPPED(up)) {
		up->flags &= ~IP22ZILOG_FLAG_TX_STOPPED;
		goto ack_tx_int;
	}

	if (up->port.x_char) {
		up->flags |= IP22ZILOG_FLAG_TX_ACTIVE;
		writeb(up->port.x_char, &channel->data);
		ZSDELAY();
		ZS_WSYNC(channel);

		up->port.icount.tx++;
		up->port.x_char = 0;
		return;
	}

	if (up->port.state == NULL)
		goto ack_tx_int;
	xmit = &up->port.state->xmit;
	if (uart_circ_empty(xmit))
		goto ack_tx_int;
	if (uart_tx_stopped(&up->port))
		goto ack_tx_int;

	up->flags |= IP22ZILOG_FLAG_TX_ACTIVE;
	writeb(xmit->buf[xmit->tail], &channel->data);
	ZSDELAY();
	ZS_WSYNC(channel);

	xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
	up->port.icount.tx++;

	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
		uart_write_wakeup(&up->port);

	return;

ack_tx_int:
	writeb(RES_Tx_P, &channel->control);
	ZSDELAY();
	ZS_WSYNC(channel);
}
Esempio n. 3
0
/**
 * cdns_uart_isr - Interrupt handler
 * @irq: Irq number
 * @dev_id: Id of the port
 *
 * Return: IRQHANDLED
 */
static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
{
    struct uart_port *port = (struct uart_port *)dev_id;
    unsigned long flags;
    unsigned int isrstatus, numbytes;
    unsigned int data;
    char status = TTY_NORMAL;

    spin_lock_irqsave(&port->lock, flags);

    /* Read the interrupt status register to determine which
     * interrupt(s) is/are active.
     */
    isrstatus = cdns_uart_readl(CDNS_UART_ISR_OFFSET);

    /*
     * There is no hardware break detection, so we interpret framing
     * error with all-zeros data as a break sequence. Most of the time,
     * there's another non-zero byte at the end of the sequence.
     */
    if (isrstatus & CDNS_UART_IXR_FRAMING) {
        while (!(cdns_uart_readl(CDNS_UART_SR_OFFSET) &
                 CDNS_UART_SR_RXEMPTY)) {
            if (!cdns_uart_readl(CDNS_UART_FIFO_OFFSET)) {
                port->read_status_mask |= CDNS_UART_IXR_BRK;
                isrstatus &= ~CDNS_UART_IXR_FRAMING;
            }
        }
        cdns_uart_writel(CDNS_UART_IXR_FRAMING, CDNS_UART_ISR_OFFSET);
    }

    /* drop byte with parity error if IGNPAR specified */
    if (isrstatus & port->ignore_status_mask & CDNS_UART_IXR_PARITY)
        isrstatus &= ~(CDNS_UART_IXR_RXTRIG | CDNS_UART_IXR_TOUT);

    isrstatus &= port->read_status_mask;
    isrstatus &= ~port->ignore_status_mask;

    if ((isrstatus & CDNS_UART_IXR_TOUT) ||
            (isrstatus & CDNS_UART_IXR_RXTRIG)) {
        /* Receive Timeout Interrupt */
        while ((cdns_uart_readl(CDNS_UART_SR_OFFSET) &
                CDNS_UART_SR_RXEMPTY) != CDNS_UART_SR_RXEMPTY) {
            data = cdns_uart_readl(CDNS_UART_FIFO_OFFSET);

            /* Non-NULL byte after BREAK is garbage (99%) */
            if (data && (port->read_status_mask &
                         CDNS_UART_IXR_BRK)) {
                port->read_status_mask &= ~CDNS_UART_IXR_BRK;
                port->icount.brk++;
                if (uart_handle_break(port))
                    continue;
            }

#ifdef SUPPORT_SYSRQ
            /*
             * uart_handle_sysrq_char() doesn't work if
             * spinlocked, for some reason
             */
            if (port->sysrq) {
                spin_unlock(&port->lock);
                if (uart_handle_sysrq_char(port,
                                           (unsigned char)data)) {
                    spin_lock(&port->lock);
                    continue;
                }
                spin_lock(&port->lock);
            }
#endif

            port->icount.rx++;

            if (isrstatus & CDNS_UART_IXR_PARITY) {
                port->icount.parity++;
                status = TTY_PARITY;
            } else if (isrstatus & CDNS_UART_IXR_FRAMING) {
                port->icount.frame++;
                status = TTY_FRAME;
            } else if (isrstatus & CDNS_UART_IXR_OVERRUN) {
                port->icount.overrun++;
            }

            uart_insert_char(port, isrstatus, CDNS_UART_IXR_OVERRUN,
                             data, status);
        }
        spin_unlock(&port->lock);
        tty_flip_buffer_push(&port->state->port);
        spin_lock(&port->lock);
    }

    /* Dispatch an appropriate handler */
    if ((isrstatus & CDNS_UART_IXR_TXEMPTY) == CDNS_UART_IXR_TXEMPTY) {
        if (uart_circ_empty(&port->state->xmit)) {
            cdns_uart_writel(CDNS_UART_IXR_TXEMPTY,
                             CDNS_UART_IDR_OFFSET);
        } else {
            numbytes = port->fifosize;
            /* Break if no more data available in the UART buffer */
            while (numbytes--) {
                if (uart_circ_empty(&port->state->xmit))
                    break;
                /* Get the data from the UART circular buffer
                 * and write it to the cdns_uart's TX_FIFO
                 * register.
                 */
                cdns_uart_writel(
                    port->state->xmit.buf[port->state->xmit.
                                          tail], CDNS_UART_FIFO_OFFSET);

                port->icount.tx++;

                /* Adjust the tail of the UART buffer and wrap
                 * the buffer if it reaches limit.
                 */
                port->state->xmit.tail =
                    (port->state->xmit.tail + 1) &
                    (UART_XMIT_SIZE - 1);
            }

            if (uart_circ_chars_pending(
                        &port->state->xmit) < WAKEUP_CHARS)
                uart_write_wakeup(port);
        }
    }

    cdns_uart_writel(isrstatus, CDNS_UART_ISR_OFFSET);

    /* be sure to release the lock and tty before leaving */
    spin_unlock_irqrestore(&port->lock, flags);

    return IRQ_HANDLED;
}
Esempio n. 4
0
/*
 * Transmit characters, refill buffer descriptor, if possible
 */
static int cpm_uart_tx_pump(struct uart_port *port)
{
	volatile cbd_t *bdp;
	unsigned char *p;
	int count;
	struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
	struct circ_buf *xmit = &port->info->xmit;

	/* Handle xon/xoff */
	if (port->x_char) {
		/* Pick next descriptor and fill from buffer */
		bdp = pinfo->tx_cur;

		p = cpm2cpu_addr(bdp->cbd_bufaddr);

		*p++ = port->x_char;
		bdp->cbd_datlen = 1;
		bdp->cbd_sc |= BD_SC_READY;
		/* Get next BD. */
		if (bdp->cbd_sc & BD_SC_WRAP)
			bdp = pinfo->tx_bd_base;
		else
			bdp++;
		pinfo->tx_cur = bdp;

		port->icount.tx++;
		port->x_char = 0;
		return 1;
	}

	if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
		cpm_uart_stop_tx(port);
		return 0;
	}

	/* Pick next descriptor and fill from buffer */
	bdp = pinfo->tx_cur;

	while (!(bdp->cbd_sc & BD_SC_READY) && (xmit->tail != xmit->head)) {
		count = 0;
		p = cpm2cpu_addr(bdp->cbd_bufaddr);
		while (count < pinfo->tx_fifosize) {
			*p++ = xmit->buf[xmit->tail];
			xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
			port->icount.tx++;
			count++;
			if (xmit->head == xmit->tail)
				break;
		}
		bdp->cbd_datlen = count;
		bdp->cbd_sc |= BD_SC_READY;
		__asm__("eieio");
		/* Get next BD. */
		if (bdp->cbd_sc & BD_SC_WRAP)
			bdp = pinfo->tx_bd_base;
		else
			bdp++;
	}
	pinfo->tx_cur = bdp;

	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
		uart_write_wakeup(port);

	if (uart_circ_empty(xmit)) {
		cpm_uart_stop_tx(port);
		return 0;
	}

	return 1;
}
Esempio n. 5
0
static void neo_copy_data_from_queue_to_uart(struct jsm_channel *ch)
{
	u16 head;
	u16 tail;
	int n;
	int s;
	int qlen;
	u32 len_written = 0;

	if (!ch)
		return;

	/* No data to write to the UART */
	if (ch->ch_w_tail == ch->ch_w_head)
		return;

	/* If port is "stopped", don't send any data to the UART */
	if ((ch->ch_flags & CH_STOP) || (ch->ch_flags & CH_BREAK_SENDING))
		return;
	/*
	 * If FIFOs are disabled. Send data directly to txrx register
	 */
	if (!(ch->ch_flags & CH_FIFO_ENABLED)) {
		u8 lsrbits = readb(&ch->ch_neo_uart->lsr);

		ch->ch_cached_lsr |= lsrbits;
		if (ch->ch_cached_lsr & UART_LSR_THRE) {
			ch->ch_cached_lsr &= ~(UART_LSR_THRE);

			writeb(ch->ch_wqueue[ch->ch_w_tail], &ch->ch_neo_uart->txrx);
			jsm_printk(WRITE, INFO, &ch->ch_bd->pci_dev,
					"Tx data: %x\n", ch->ch_wqueue[ch->ch_w_head]);
			ch->ch_w_tail++;
			ch->ch_w_tail &= WQUEUEMASK;
			ch->ch_txcount++;
		}
		return;
	}

	/*
	 * We have to do it this way, because of the EXAR TXFIFO count bug.
	 */
	if (!(ch->ch_flags & (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM)))
		return;

	n = UART_17158_TX_FIFOSIZE - ch->ch_t_tlevel;

	/* cache head and tail of queue */
	head = ch->ch_w_head & WQUEUEMASK;
	tail = ch->ch_w_tail & WQUEUEMASK;
	qlen = (head - tail) & WQUEUEMASK;

	/* Find minimum of the FIFO space, versus queue length */
	n = min(n, qlen);

	while (n > 0) {

		s = ((head >= tail) ? head : WQUEUESIZE) - tail;
		s = min(s, n);

		if (s <= 0)
			break;

		memcpy_toio(&ch->ch_neo_uart->txrxburst, ch->ch_wqueue + tail, s);
		/* Add and flip queue if needed */
		tail = (tail + s) & WQUEUEMASK;
		n -= s;
		ch->ch_txcount += s;
		len_written += s;
	}

	/* Update the final tail */
	ch->ch_w_tail = tail & WQUEUEMASK;

	if (len_written >= ch->ch_t_tlevel)
		ch->ch_flags &= ~(CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);

	if (!jsm_tty_write(&ch->uart_port))
		uart_write_wakeup(&ch->uart_port);
}
Esempio n. 6
0
static void sci_transmit_chars(struct uart_port *port)
{
	struct circ_buf *xmit = &port->info->xmit;
	unsigned int stopped = uart_tx_stopped(port);
	unsigned short status;
	unsigned short ctrl;
	int count;

	status = sci_in(port, SCxSR);
	if (!(status & SCxSR_TDxE(port))) {
		ctrl = sci_in(port, SCSCR);
		if (uart_circ_empty(xmit)) {
			ctrl &= ~SCI_CTRL_FLAGS_TIE;
		} else {
			ctrl |= SCI_CTRL_FLAGS_TIE;
		}
		sci_out(port, SCSCR, ctrl);
		return;
	}

	if (port->type == PORT_SCIF)
		count = scif_txroom(port);
	else
		count = sci_txroom(port);

	do {
		unsigned char c;

		if (port->x_char) {
			c = port->x_char;
			port->x_char = 0;
		} else if (!uart_circ_empty(xmit) && !stopped) {
			c = xmit->buf[xmit->tail];
			xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
		} else {
			break;
		}

		sci_out(port, SCxTDR, c);

		port->icount.tx++;
	} while (--count > 0);

	sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port));

	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
		uart_write_wakeup(port);
	if (uart_circ_empty(xmit)) {
		sci_stop_tx(port);
	} else {
		ctrl = sci_in(port, SCSCR);

		if (port->type == PORT_SCIF) {
			sci_in(port, SCxSR); /* Dummy read */
			sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port));
		}

		ctrl |= SCI_CTRL_FLAGS_TIE;
		sci_out(port, SCSCR, ctrl);
	}
}
Esempio n. 7
0
static void neo_copy_data_from_queue_to_uart(struct jsm_channel *ch)
{
	u16 head;
	u16 tail;
	int n;
	int s;
	int qlen;
	u32 len_written = 0;
	struct circ_buf *circ;

	if (!ch)
		return;

	circ = &ch->uart_port.state->xmit;

	/* No data to write to the UART */
	if (uart_circ_empty(circ))
		return;

	/* If port is "stopped", don't send any data to the UART */
	if ((ch->ch_flags & CH_STOP) || (ch->ch_flags & CH_BREAK_SENDING))
		return;
	/*
	 * If FIFOs are disabled. Send data directly to txrx register
	 */
	if (!(ch->ch_flags & CH_FIFO_ENABLED)) {
		u8 lsrbits = readb(&ch->ch_neo_uart->lsr);

		ch->ch_cached_lsr |= lsrbits;
		if (ch->ch_cached_lsr & UART_LSR_THRE) {
			ch->ch_cached_lsr &= ~(UART_LSR_THRE);

			writeb(circ->buf[circ->tail], &ch->ch_neo_uart->txrx);
			jsm_dbg(WRITE, &ch->ch_bd->pci_dev,
				"Tx data: %x\n", circ->buf[circ->tail]);
			circ->tail = (circ->tail + 1) & (UART_XMIT_SIZE - 1);
			ch->ch_txcount++;
		}
		return;
	}

	/*
	 * We have to do it this way, because of the EXAR TXFIFO count bug.
	 */
	if (!(ch->ch_flags & (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM)))
		return;

	n = UART_17158_TX_FIFOSIZE - ch->ch_t_tlevel;

	/* cache head and tail of queue */
	head = circ->head & (UART_XMIT_SIZE - 1);
	tail = circ->tail & (UART_XMIT_SIZE - 1);
	qlen = uart_circ_chars_pending(circ);

	/* Find minimum of the FIFO space, versus queue length */
	n = min(n, qlen);

	while (n > 0) {

		s = ((head >= tail) ? head : UART_XMIT_SIZE) - tail;
		s = min(s, n);

		if (s <= 0)
			break;

		memcpy_toio(&ch->ch_neo_uart->txrxburst, circ->buf + tail, s);
		/* Add and flip queue if needed */
		tail = (tail + s) & (UART_XMIT_SIZE - 1);
		n -= s;
		ch->ch_txcount += s;
		len_written += s;
	}

	/* Update the final tail */
	circ->tail = tail & (UART_XMIT_SIZE - 1);

	if (len_written >= ch->ch_t_tlevel)
		ch->ch_flags &= ~(CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);

	if (uart_circ_empty(circ))
		uart_write_wakeup(&ch->uart_port);
}
Esempio n. 8
0
static void bfin_serial_rx_chars(struct bfin_serial_port *uart)
{
	struct tty_struct *tty = NULL;
	unsigned int status, ch, flg;
	static struct timeval anomaly_start = { .tv_sec = 0 };

	status = UART_GET_LSR(uart);
	UART_CLEAR_LSR(uart);

 	ch = UART_GET_CHAR(uart);
 	uart->port.icount.rx++;

#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
	defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
	if (kgdb_connected && kgdboc_port_line == uart->port.line)
		if (ch == 0x3) {/* Ctrl + C */
			kgdb_breakpoint();
			return;
		}

	if (!uart->port.info || !uart->port.info->tty)
		return;
#endif
	tty = uart->port.info->tty;

	if (ANOMALY_05000363) {
		/* The BF533 (and BF561) family of processors have a nice anomaly
		 * where they continuously generate characters for a "single" break.
		 * We have to basically ignore this flood until the "next" valid
		 * character comes across.  Due to the nature of the flood, it is
		 * not possible to reliably catch bytes that are sent too quickly
		 * after this break.  So application code talking to the Blackfin
		 * which sends a break signal must allow at least 1.5 character
		 * times after the end of the break for things to stabilize.  This
		 * timeout was picked as it must absolutely be larger than 1
		 * character time +/- some percent.  So 1.5 sounds good.  All other
		 * Blackfin families operate properly.  Woo.
		 */
		if (anomaly_start.tv_sec) {
			struct timeval curr;
			suseconds_t usecs;

			if ((~ch & (~ch + 1)) & 0xff)
				goto known_good_char;

			do_gettimeofday(&curr);
			if (curr.tv_sec - anomaly_start.tv_sec > 1)
				goto known_good_char;

			usecs = 0;
			if (curr.tv_sec != anomaly_start.tv_sec)
				usecs += USEC_PER_SEC;
			usecs += curr.tv_usec - anomaly_start.tv_usec;

			if (usecs > UART_GET_ANOMALY_THRESHOLD(uart))
				goto known_good_char;

			if (ch)
				anomaly_start.tv_sec = 0;
			else
				anomaly_start = curr;

			return;

 known_good_char:
			status &= ~BI;
			anomaly_start.tv_sec = 0;
		}
	}

	if (status & BI) {
		if (ANOMALY_05000363)
			if (bfin_revid() < 5)
				do_gettimeofday(&anomaly_start);
		uart->port.icount.brk++;
		if (uart_handle_break(&uart->port))
			goto ignore_char;
		status &= ~(PE | FE);
	}
	if (status & PE)
		uart->port.icount.parity++;
	if (status & OE)
		uart->port.icount.overrun++;
	if (status & FE)
		uart->port.icount.frame++;

	status &= uart->port.read_status_mask;

	if (status & BI)
		flg = TTY_BREAK;
	else if (status & PE)
		flg = TTY_PARITY;
	else if (status & FE)
		flg = TTY_FRAME;
	else
		flg = TTY_NORMAL;

	if (uart_handle_sysrq_char(&uart->port, ch))
		goto ignore_char;

	uart_insert_char(&uart->port, status, OE, ch, flg);

 ignore_char:
	tty_flip_buffer_push(tty);
}

static void bfin_serial_tx_chars(struct bfin_serial_port *uart)
{
	struct circ_buf *xmit = &uart->port.info->xmit;

	/*
	 * Check the modem control lines before
	 * transmitting anything.
	 */
	bfin_serial_mctrl_check(uart);

	if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) {
#ifdef CONFIG_BF54x
		/* Clear TFI bit */
		UART_PUT_LSR(uart, TFI);
#endif
		UART_CLEAR_IER(uart, ETBEI);
		return;
	}

	if (uart->port.x_char) {
		UART_PUT_CHAR(uart, uart->port.x_char);
		uart->port.icount.tx++;
		uart->port.x_char = 0;
	}

	while ((UART_GET_LSR(uart) & THRE) && xmit->tail != xmit->head) {
		UART_PUT_CHAR(uart, xmit->buf[xmit->tail]);
		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
		uart->port.icount.tx++;
		SSYNC();
	}

	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
		uart_write_wakeup(&uart->port);
}

static irqreturn_t bfin_serial_rx_int(int irq, void *dev_id)
{
	struct bfin_serial_port *uart = dev_id;

	spin_lock(&uart->port.lock);
	while (UART_GET_LSR(uart) & DR)
		bfin_serial_rx_chars(uart);
	spin_unlock(&uart->port.lock);

	return IRQ_HANDLED;
}

static irqreturn_t bfin_serial_tx_int(int irq, void *dev_id)
{
	struct bfin_serial_port *uart = dev_id;

	spin_lock(&uart->port.lock);
	if (UART_GET_LSR(uart) & THRE)
		bfin_serial_tx_chars(uart);
	spin_unlock(&uart->port.lock);

	return IRQ_HANDLED;
}
#endif

#ifdef CONFIG_SERIAL_BFIN_DMA
static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart)
{
	struct circ_buf *xmit = &uart->port.info->xmit;

	uart->tx_done = 0;

	/*
	 * Check the modem control lines before
	 * transmitting anything.
	 */
	bfin_serial_mctrl_check(uart);

	if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) {
		uart->tx_count = 0;
		uart->tx_done = 1;
		return;
	}

	if (uart->port.x_char) {
		UART_PUT_CHAR(uart, uart->port.x_char);
		uart->port.icount.tx++;
		uart->port.x_char = 0;
	}

	uart->tx_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE);
	if (uart->tx_count > (UART_XMIT_SIZE - xmit->tail))
		uart->tx_count = UART_XMIT_SIZE - xmit->tail;
	blackfin_dcache_flush_range((unsigned long)(xmit->buf+xmit->tail),
					(unsigned long)(xmit->buf+xmit->tail+uart->tx_count));
	set_dma_config(uart->tx_dma_channel,
		set_bfin_dma_config(DIR_READ, DMA_FLOW_STOP,
			INTR_ON_BUF,
			DIMENSION_LINEAR,
			DATA_SIZE_8,
			DMA_SYNC_RESTART));
	set_dma_start_addr(uart->tx_dma_channel, (unsigned long)(xmit->buf+xmit->tail));
	set_dma_x_count(uart->tx_dma_channel, uart->tx_count);
	set_dma_x_modify(uart->tx_dma_channel, 1);
	enable_dma(uart->tx_dma_channel);

	UART_SET_IER(uart, ETBEI);
}

static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart)
{
	struct tty_struct *tty = uart->port.info->port.tty;
	int i, flg, status;

	status = UART_GET_LSR(uart);
	UART_CLEAR_LSR(uart);

	uart->port.icount.rx +=
		CIRC_CNT(uart->rx_dma_buf.head, uart->rx_dma_buf.tail,
		UART_XMIT_SIZE);

	if (status & BI) {
		uart->port.icount.brk++;
		if (uart_handle_break(&uart->port))
			goto dma_ignore_char;
		status &= ~(PE | FE);
	}
	if (status & PE)
		uart->port.icount.parity++;
	if (status & OE)
		uart->port.icount.overrun++;
	if (status & FE)
		uart->port.icount.frame++;

	status &= uart->port.read_status_mask;

	if (status & BI)
		flg = TTY_BREAK;
	else if (status & PE)
		flg = TTY_PARITY;
	else if (status & FE)
		flg = TTY_FRAME;
	else
		flg = TTY_NORMAL;

	for (i = uart->rx_dma_buf.tail; i != uart->rx_dma_buf.head; i++) {
		if (i >= UART_XMIT_SIZE)
			i = 0;
		if (!uart_handle_sysrq_char(&uart->port, uart->rx_dma_buf.buf[i]))
			uart_insert_char(&uart->port, status, OE,
				uart->rx_dma_buf.buf[i], flg);
	}

 dma_ignore_char:
	tty_flip_buffer_push(tty);
}

void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
{
	int x_pos, pos, flags;

	spin_lock_irqsave(&uart->port.lock, flags);

	uart->rx_dma_nrows = get_dma_curr_ycount(uart->rx_dma_channel);
	x_pos = get_dma_curr_xcount(uart->rx_dma_channel);
	uart->rx_dma_nrows = DMA_RX_YCOUNT - uart->rx_dma_nrows;
	if (uart->rx_dma_nrows == DMA_RX_YCOUNT)
		uart->rx_dma_nrows = 0;
	x_pos = DMA_RX_XCOUNT - x_pos;
	if (x_pos == DMA_RX_XCOUNT)
		x_pos = 0;

	pos = uart->rx_dma_nrows * DMA_RX_XCOUNT + x_pos;
	if (pos != uart->rx_dma_buf.tail) {
		uart->rx_dma_buf.head = pos;
		bfin_serial_dma_rx_chars(uart);
		uart->rx_dma_buf.tail = uart->rx_dma_buf.head;
	}

	spin_unlock_irqrestore(&uart->port.lock, flags);

	mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES);
}
Esempio n. 9
0
/**
 * xuartps_isr - Interrupt handler
 * @irq: Irq number
 * @dev_id: Id of the port
 *
 * Returns IRQHANDLED
 **/
static irqreturn_t xuartps_isr(int irq, void *dev_id)
{
	struct uart_port *port = (struct uart_port *)dev_id;
	unsigned long flags;
	unsigned int isrstatus, numbytes;
	unsigned int data;
	char status = TTY_NORMAL;

	spin_lock_irqsave(&port->lock, flags);

	/* Read the interrupt status register to determine which
	 * interrupt(s) is/are active.
	 */
	isrstatus = xuartps_readl(XUARTPS_ISR_OFFSET);

	/* drop byte with parity error if IGNPAR specified */
	if (isrstatus & port->ignore_status_mask & XUARTPS_IXR_PARITY)
		isrstatus &= ~(XUARTPS_IXR_RXTRIG | XUARTPS_IXR_TOUT);

	isrstatus &= port->read_status_mask;
	isrstatus &= ~port->ignore_status_mask;

	if ((isrstatus & XUARTPS_IXR_TOUT) ||
		(isrstatus & XUARTPS_IXR_RXTRIG)) {
		/* Receive Timeout Interrupt */
		while ((xuartps_readl(XUARTPS_SR_OFFSET) &
			XUARTPS_SR_RXEMPTY) != XUARTPS_SR_RXEMPTY) {
			data = xuartps_readl(XUARTPS_FIFO_OFFSET);
			port->icount.rx++;

			if (isrstatus & XUARTPS_IXR_PARITY) {
				port->icount.parity++;
				status = TTY_PARITY;
			} else if (isrstatus & XUARTPS_IXR_FRAMING) {
				port->icount.frame++;
				status = TTY_FRAME;
			} else if (isrstatus & XUARTPS_IXR_OVERRUN)
				port->icount.overrun++;

			uart_insert_char(port, isrstatus, XUARTPS_IXR_OVERRUN,
					data, status);
		}
		spin_unlock(&port->lock);
		tty_flip_buffer_push(&port->state->port);
		spin_lock(&port->lock);
	}

	/* Dispatch an appropriate handler */
	if ((isrstatus & XUARTPS_IXR_TXEMPTY) == XUARTPS_IXR_TXEMPTY) {
		if (uart_circ_empty(&port->state->xmit)) {
			xuartps_writel(XUARTPS_IXR_TXEMPTY,
						XUARTPS_IDR_OFFSET);
		} else {
			numbytes = port->fifosize;
			/* Break if no more data available in the UART buffer */
			while (numbytes--) {
				if (uart_circ_empty(&port->state->xmit))
					break;
				/* Get the data from the UART circular buffer
				 * and write it to the xuartps's TX_FIFO
				 * register.
				 */
				xuartps_writel(
					port->state->xmit.buf[port->state->xmit.
					tail], XUARTPS_FIFO_OFFSET);

				port->icount.tx++;

				/* Adjust the tail of the UART buffer and wrap
				 * the buffer if it reaches limit.
				 */
				port->state->xmit.tail =
					(port->state->xmit.tail + 1) & \
						(UART_XMIT_SIZE - 1);
			}

			if (uart_circ_chars_pending(
					&port->state->xmit) < WAKEUP_CHARS)
				uart_write_wakeup(port);
		}
	}

	xuartps_writel(isrstatus, XUARTPS_ISR_OFFSET);

	/* be sure to release the lock and tty before leaving */
	spin_unlock_irqrestore(&port->lock, flags);

	return IRQ_HANDLED;
}
static void ambauart_int(int irq, void *dev_id, struct pt_regs *regs)
{
    struct uart_port *port = dev_id;
    struct uart_info *info = port->info;
    unsigned int status, ier, old_ier, count, lsr;

    old_ier  = UART_GET_IER(port);
    UART_PUT_IER(port, (old_ier & 0xFFFFF0FF));

    status = UART_GET_INT_STATUS(port);
    
    lsr = UART_GET_LSR(port);
    
    /* KS8695_INTMASK_UART_RX is not set during breakpoint as it should (looks
     * like a HW bug), so we specifically check for a breakpoint condition in
     * the UART line status register.
     * Some bits from the UART line status register are cleared only when they
     * are read by CPU. That is why we cannot read the line status register
     * twice, and should pass the first read as argument to ambauart_rx_chars.
     * Refer to CENTAUR KS8695PX's Register Description document:
     * KS8695PX_REG_DESCP_v1.0.pdf, page 58: "UART Line Status Register".
     */
    if (status & KS8695_INTMASK_UART_RX || lsr & KS8695_UART_LINES_BE)
    {
#ifdef SUPPORT_SYSRQ
	ambauart_rx_chars(port, regs, lsr);
#else
	ambauart_rx_chars(port, lsr);
#endif
    }
    if (status & KS8695_INTMASK_UART_TX) 
    {
        if (port->x_char)
        {
                UART_CLR_INT_STATUS(port, KS8695_INTMASK_UART_TX);
                UART_PUT_CHAR(port, (u_int) port->x_char);
                port->icount.tx++;
                port->x_char = 0;
                ier = UART_GET_IER(port);
                ier &= 0xFFFFFEFF;
                UART_PUT_IER(port, ier);
                printk("XOn/Off sent\n");
                return;
        }
        for ( count = 0; count < 16; count++)
        {
              if (info->xmit.head == info->xmit.tail)
              {
                 /*ier = UART_GET_IER(port);
                 ier &= 0xFFFFFEFF;
                 UART_PUT_IER(port, ier);*/
                 break;
              }
              UART_CLR_INT_STATUS(port, KS8695_INTMASK_UART_TX);
              UART_PUT_CHAR(port, (u_int) (info->xmit.buf[info->xmit.tail]));
              info->xmit.tail = (info->xmit.tail + 1) & (UART_XMIT_SIZE - 1);
              port->icount.tx++;
        };
        if (CIRC_CNT(info->xmit.head, info->xmit.tail, UART_XMIT_SIZE) < WAKEUP_CHARS)
                uart_write_wakeup(port);
 
        if (info->xmit.head == info->xmit.tail)
        {
           ier = UART_GET_IER(port);
           ier &= 0xFFFFFEFF;
           UART_PUT_IER(port, ier);
        }
    }
    if (status & KS8695_INTMASK_UART_MODEMS)
    {
	ambauart_modem_status(port);
    }
    if (status & KS8695_INTMASK_UART_MODEMS)
    {
         ambauart_modem_status(port);
    }
    if ( status & KS8695_INTMASK_UART_LINE_ERR)
    {
         UART_GET_LSR(port);
    }
    if (info->xmit.head == info->xmit.tail)
       UART_PUT_IER(port, (old_ier & 0xFFFFFEFF));
    else
       UART_PUT_IER(port, old_ier | KS8695_INTMASK_UART_TX);
}
Esempio n. 11
0
static void tegra_uart_tasklet_action(unsigned long data)
{
	struct tegra_uart_port *t = (struct tegra_uart_port *)data;
	uart_write_wakeup(&t->uport);

}
Esempio n. 12
0
/*
 * ------------------------------------------------------------
 * receive_char ()
 *
 * This routine deals with inputs from any lines.
 * ------------------------------------------------------------
 */
static inline void dz_receive_chars(struct dz_port *dport)
{
	struct tty_struct *tty = NULL;
	struct uart_icount *icount;
	int ignore = 0;
	unsigned short status, tmp;
	unsigned char ch, flag;

	/* this code is going to be a problem...
	   the call to tty_flip_buffer is going to need
	   to be rethought...
	 */
	do {
		status = dz_in(dport, DZ_RBUF);

		/* punt so we don't get duplicate characters */
		if (!(status & DZ_DVAL))
			goto ignore_char;


		ch = UCHAR(status);	/* grab the char */
		flag = TTY_NORMAL;

#if 0
		if (info->is_console) {
			if (ch == 0)
				return;		/* it's a break ... */
		}
#endif

		tty = dport->port.info->tty;/* now tty points to the proper dev */
		icount = &dport->port.icount;

		if (!tty)
			break;
		if (tty->flip.count >= TTY_FLIPBUF_SIZE)
			break;

		icount->rx++;

		/* keep track of the statistics */
		if (status & (DZ_OERR | DZ_FERR | DZ_PERR)) {
			if (status & DZ_PERR)	/* parity error */
				icount->parity++;
			else if (status & DZ_FERR)	/* frame error */
				icount->frame++;
			if (status & DZ_OERR)	/* overrun error */
				icount->overrun++;

			/*  check to see if we should ignore the character
			   and mask off conditions that should be ignored
			 */

			if (status & dport->port.ignore_status_mask) {
				if (++ignore > 100)
					break;
				goto ignore_char;
			}
			/* mask off the error conditions we want to ignore */
			tmp = status & dport->port.read_status_mask;

			if (tmp & DZ_PERR) {
				flag = TTY_PARITY;
#ifdef DEBUG_DZ
				debug_console("PERR\n", 5);
#endif
			} else if (tmp & DZ_FERR) {
				flag = TTY_FRAME;
#ifdef DEBUG_DZ
				debug_console("FERR\n", 5);
#endif
			}
			if (tmp & DZ_OERR) {
#ifdef DEBUG_DZ
				debug_console("OERR\n", 5);
#endif
				tty_insert_flip_char(tty, ch, flag);
				ch = 0;
				flag = TTY_OVERRUN;
			}
		}
		tty_insert_flip_char(tty, ch, flag);
	      ignore_char:
	} while (status & DZ_DVAL);

	if (tty)
		tty_flip_buffer_push(tty);
}

/*
 * ------------------------------------------------------------
 * transmit_char ()
 *
 * This routine deals with outputs to any lines.
 * ------------------------------------------------------------
 */
static inline void dz_transmit_chars(struct dz_port *dport)
{
	struct circ_buf *xmit = &dport->port.info->xmit;
	unsigned char tmp;

	if (dport->port.x_char) {	/* XON/XOFF chars */
		dz_out(dport, DZ_TDR, dport->port.x_char);
		dport->port.icount.tx++;
		dport->port.x_char = 0;
		return;
	}
	/* if nothing to do or stopped or hardware stopped */
	if (uart_circ_empty(xmit) || uart_tx_stopped(&dport->port)) {
		dz_stop_tx(&dport->port, 0);
		return;
	}

	/*
	 * if something to do ... (rember the dz has no output fifo so we go
	 * one char at a time :-<
	 */
	tmp = xmit->buf[xmit->tail];
	xmit->tail = (xmit->tail + 1) & (DZ_XMIT_SIZE - 1);
	dz_out(dport, DZ_TDR, tmp);
	dport->port.icount.tx++;

	if (uart_circ_chars_pending(xmit) < DZ_WAKEUP_CHARS)
		uart_write_wakeup(&dport->port);

	/* Are we done */
	if (uart_circ_empty(xmit))
		dz_stop_tx(&dport->port, 0);
}

/*
 * ------------------------------------------------------------
 * check_modem_status ()
 *
 * Only valid for the MODEM line duh !
 * ------------------------------------------------------------
 */
static inline void check_modem_status(struct dz_port *dport)
{
	unsigned short status;

	/* if not ne modem line just return */
	if (dport->port.line != DZ_MODEM)
		return;

	status = dz_in(dport, DZ_MSR);

	/* it's easy, since DSR2 is the only bit in the register */
	if (status)
		dport->port.icount.dsr++;
}
Esempio n. 13
0
static void
transmit_chars_no_dma(struct uart_cris_port *up)
{
	int max_count;
	struct circ_buf *xmit = &up->port.state->xmit;

	void __iomem *regi_ser = up->regi_ser;
	reg_ser_r_stat_din rstat;
	reg_ser_rw_ack_intr ack_intr = { .tr_rdy = regk_ser_yes };

	if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
		/* No more to send, so disable the interrupt. */
		reg_ser_rw_intr_mask intr_mask;

		intr_mask = REG_RD(ser, regi_ser, rw_intr_mask);
		intr_mask.tr_rdy = 0;
		intr_mask.tr_empty = 0;
		REG_WR(ser, regi_ser, rw_intr_mask, intr_mask);
		up->write_ongoing = 0;
		return;
	}

	/* If the serport is fast, we send up to max_count bytes before
	   exiting the loop.  */
	max_count = 64;
	do {
		reg_ser_rw_dout dout = { .data = xmit->buf[xmit->tail] };

		REG_WR(ser, regi_ser, rw_dout, dout);
		REG_WR(ser, regi_ser, rw_ack_intr, ack_intr);
		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE-1);
		up->port.icount.tx++;
		if (xmit->head == xmit->tail)
			break;
		rstat = REG_RD(ser, regi_ser, r_stat_din);
	} while ((--max_count > 0) && rstat.tr_rdy);

	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
		uart_write_wakeup(&up->port);
}

static void receive_chars_no_dma(struct uart_cris_port *up)
{
	reg_ser_rs_stat_din stat_din;
	reg_ser_r_stat_din rstat;
	struct tty_port *port;
	struct uart_icount *icount;
	int max_count = 16;
	char flag;
	reg_ser_rw_ack_intr ack_intr = { 0 };

	rstat = REG_RD(ser, up->regi_ser, r_stat_din);
	icount = &up->port.icount;
	port = &up->port.state->port;

	do {
		stat_din = REG_RD(ser, up->regi_ser, rs_stat_din);

		flag = TTY_NORMAL;
		ack_intr.dav = 1;
		REG_WR(ser, up->regi_ser, rw_ack_intr, ack_intr);
		icount->rx++;

		if (stat_din.framing_err | stat_din.par_err | stat_din.orun) {
			if (stat_din.data == 0x00 &&
			    stat_din.framing_err) {
				/* Most likely a break. */
				flag = TTY_BREAK;
				icount->brk++;
			} else if (stat_din.par_err) {
				flag = TTY_PARITY;
				icount->parity++;
			} else if (stat_din.orun) {
				flag = TTY_OVERRUN;
				icount->overrun++;
			} else if (stat_din.framing_err) {
				flag = TTY_FRAME;
				icount->frame++;
			}
		}

		/*
		 * If this becomes important, we probably *could* handle this
		 * gracefully by keeping track of the unhandled character.
		 */
		if (!tty_insert_flip_char(port, stat_din.data, flag))
			panic("%s: No tty buffer space", __func__);
		rstat = REG_RD(ser, up->regi_ser, r_stat_din);
	} while (rstat.dav && (max_count-- > 0));
	spin_unlock(&up->port.lock);
	tty_flip_buffer_push(port);
	spin_lock(&up->port.lock);
}
Esempio n. 14
0
static void handle_tx(struct uart_port *port)
{
	struct circ_buf *xmit = &port->state->xmit;
	int sent_tx;
	int tx_count;
	int x;
	unsigned int tf_pointer = 0;

	tx_count = uart_circ_chars_pending(xmit);

	if (tx_count > (UART_XMIT_SIZE - xmit->tail))
		tx_count = UART_XMIT_SIZE - xmit->tail;
	if (tx_count >= port->fifosize)
		tx_count = port->fifosize;

	/* Handle x_char */
	if (port->x_char) {
		wait_for_xmitr(port, UARTDM_ISR_TX_READY_BMSK);
		msm_hsl_write(port, tx_count + 1, UARTDM_NCF_TX_ADDR);
		msm_hsl_write(port, port->x_char, UARTDM_TF_ADDR);
		port->icount.tx++;
		port->x_char = 0;
	} else if (tx_count) {
		wait_for_xmitr(port, UARTDM_ISR_TX_READY_BMSK);
		msm_hsl_write(port, tx_count, UARTDM_NCF_TX_ADDR);
	}
	if (!tx_count) {
		msm_hsl_stop_tx(port);
		return;
	}

	while (tf_pointer < tx_count)  {
		if (unlikely(!(msm_hsl_read(port, UARTDM_SR_ADDR) &
			       UARTDM_SR_TXRDY_BMSK)))
			continue;
		switch (tx_count - tf_pointer) {
		case 1: {
			x = xmit->buf[xmit->tail];
			port->icount.tx++;
			break;
		}
		case 2: {
			x = xmit->buf[xmit->tail]
				| xmit->buf[xmit->tail+1] << 8;
			port->icount.tx += 2;
			break;
		}
		case 3: {
			x = xmit->buf[xmit->tail]
				| xmit->buf[xmit->tail+1] << 8
				| xmit->buf[xmit->tail + 2] << 16;
			port->icount.tx += 3;
			break;
		}
		default: {
			x = *((int *)&(xmit->buf[xmit->tail]));
			port->icount.tx += 4;
			break;
		}
		}
		msm_hsl_write(port, x, UARTDM_TF_ADDR);
		xmit->tail = ((tx_count - tf_pointer < 4) ?
			      (tx_count - tf_pointer + xmit->tail) :
			      (xmit->tail + 4)) & (UART_XMIT_SIZE - 1);
		tf_pointer += 4;
		sent_tx = 1;
	}

	if (uart_circ_empty(xmit))
		msm_hsl_stop_tx(port);

	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
		uart_write_wakeup(port);

}
Esempio n. 15
0
/* Handle data sending */
static void max3107_handletx(struct max3107_port *s)
{
	struct circ_buf *xmit = &s->port.state->xmit;
	int i;
	unsigned long flags;
	int len;				/* SPI transfer buffer length */
	u16 *buf;

	if (!s->tx_fifo_empty)
		/* Don't send more data before previous data is sent */
		return;

	if (uart_circ_empty(xmit) || uart_tx_stopped(&s->port))
		/* No data to send or TX is stopped */
		return;

	if (!s->txbuf) {
		dev_warn(&s->spi->dev, "Txbuf isn't ready\n");
		return;
	}
	buf = s->txbuf;
	/* Get length of data pending in circular buffer */
	len = uart_circ_chars_pending(xmit);
	if (len) {
		/* Limit to size of TX FIFO */
		if (len > MAX3107_TX_FIFO_SIZE)
			len = MAX3107_TX_FIFO_SIZE;

		pr_debug("txlen %d\n", len);

		/* Update TX counter */
		s->port.icount.tx += len;

		/* TX FIFO will no longer be empty */
		s->tx_fifo_empty = 0;

		i = 0;
		if (s->irqen_reg & MAX3107_IRQ_TXEMPTY_BIT) {
			/* First disable TX empty interrupt */
			pr_debug("Disabling TE INT\n");
			buf[i] = (MAX3107_WRITE_BIT | MAX3107_IRQEN_REG);
			s->irqen_reg &= ~MAX3107_IRQ_TXEMPTY_BIT;
			buf[i] |= s->irqen_reg;
			i++;
			len++;
		}
		/* Add data to send */
		spin_lock_irqsave(&s->port.lock, flags);
		for ( ; i < len ; i++) {
			buf[i] = (MAX3107_WRITE_BIT | MAX3107_THR_REG);
			buf[i] |= ((u16)xmit->buf[xmit->tail] &
						MAX3107_SPI_TX_DATA_MASK);
			xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
		}
		spin_unlock_irqrestore(&s->port.lock, flags);
		if (!(s->irqen_reg & MAX3107_IRQ_TXEMPTY_BIT)) {
			/* Enable TX empty interrupt */
			pr_debug("Enabling TE INT\n");
			buf[i] = (MAX3107_WRITE_BIT | MAX3107_IRQEN_REG);
			s->irqen_reg |= MAX3107_IRQ_TXEMPTY_BIT;
			buf[i] |= s->irqen_reg;
			i++;
			len++;
		}
		if (!s->tx_enabled) {
			/* Enable TX */
			pr_debug("Enable TX\n");
			buf[i] = (MAX3107_WRITE_BIT | MAX3107_MODE1_REG);
			spin_lock_irqsave(&s->data_lock, flags);
			s->mode1_reg &= ~MAX3107_MODE1_TXDIS_BIT;
			buf[i] |= s->mode1_reg;
			spin_unlock_irqrestore(&s->data_lock, flags);
			s->tx_enabled = 1;
			i++;
			len++;
		}

		/* Perform the SPI transfer */
		if (max3107_rw(s, (u8 *)buf, NULL, len*2)) {
			dev_err(&s->spi->dev,
				"SPI transfer TX handling failed\n");
			return;
		}
	}

	/* Indicate wake up if circular buffer is getting low on data */
	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
		uart_write_wakeup(&s->port);

}
Esempio n. 16
0
/*
 * Transmit characters, refill buffer descriptor, if possible
 */
static int cpm_uart_tx_pump(struct uart_port *port)
{
	cbd_t __iomem *bdp;
	u8 *p;
	int count;
	struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
	struct circ_buf *xmit = &port->info->xmit;

	/* Handle xon/xoff */
	if (port->x_char) {
		/* Pick next descriptor and fill from buffer */
		bdp = pinfo->tx_cur;

		p = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), pinfo);

		*p++ = port->x_char;

		out_be16(&bdp->cbd_datlen, 1);
		setbits16(&bdp->cbd_sc, BD_SC_READY);
		/* Get next BD. */
		if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP)
			bdp = pinfo->tx_bd_base;
		else
			bdp++;
		pinfo->tx_cur = bdp;

		port->icount.tx++;
		port->x_char = 0;
		return 1;
	}

	if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
		cpm_uart_stop_tx(port);
		return 0;
	}

	/* Pick next descriptor and fill from buffer */
	bdp = pinfo->tx_cur;

	while (!(in_be16(&bdp->cbd_sc) & BD_SC_READY) &&
	       xmit->tail != xmit->head) {
		count = 0;
		p = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), pinfo);
		while (count < pinfo->tx_fifosize) {
			*p++ = xmit->buf[xmit->tail];
			xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
			port->icount.tx++;
			count++;
			if (xmit->head == xmit->tail)
				break;
		}
		out_be16(&bdp->cbd_datlen, count);
		setbits16(&bdp->cbd_sc, BD_SC_READY);
		/* Get next BD. */
		if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP)
			bdp = pinfo->tx_bd_base;
		else
			bdp++;
	}
	pinfo->tx_cur = bdp;

	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
		uart_write_wakeup(port);

	if (uart_circ_empty(xmit)) {
		cpm_uart_stop_tx(port);
		return 0;
	}

	return 1;
}
Esempio n. 17
0
static void bfin_serial_rx_chars(struct bfin_serial_port *uart)
{
	struct tty_struct *tty = NULL;
	unsigned int status, ch, flg;
	static struct timeval anomaly_start = { .tv_sec = 0 };

	status = UART_GET_LSR(uart);
	UART_CLEAR_LSR(uart);

 	ch = UART_GET_CHAR(uart);
 	uart->port.icount.rx++;

#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
	defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
	if (kgdb_connected && kgdboc_port_line == uart->port.line
		&& kgdboc_break_enabled)
		if (ch == 0x3) {/* Ctrl + C */
			kgdb_breakpoint();
			return;
		}

	if (!uart->port.state || !uart->port.state->port.tty)
		return;
#endif
	tty = uart->port.state->port.tty;

	if (ANOMALY_05000363) {
		/* The BF533 (and BF561) family of processors have a nice anomaly
		 * where they continuously generate characters for a "single" break.
		 * We have to basically ignore this flood until the "next" valid
		 * character comes across.  Due to the nature of the flood, it is
		 * not possible to reliably catch bytes that are sent too quickly
		 * after this break.  So application code talking to the Blackfin
		 * which sends a break signal must allow at least 1.5 character
		 * times after the end of the break for things to stabilize.  This
		 * timeout was picked as it must absolutely be larger than 1
		 * character time +/- some percent.  So 1.5 sounds good.  All other
		 * Blackfin families operate properly.  Woo.
		 */
		if (anomaly_start.tv_sec) {
			struct timeval curr;
			suseconds_t usecs;

			if ((~ch & (~ch + 1)) & 0xff)
				goto known_good_char;

			do_gettimeofday(&curr);
			if (curr.tv_sec - anomaly_start.tv_sec > 1)
				goto known_good_char;

			usecs = 0;
			if (curr.tv_sec != anomaly_start.tv_sec)
				usecs += USEC_PER_SEC;
			usecs += curr.tv_usec - anomaly_start.tv_usec;

			if (usecs > UART_GET_ANOMALY_THRESHOLD(uart))
				goto known_good_char;

			if (ch)
				anomaly_start.tv_sec = 0;
			else
				anomaly_start = curr;

			return;

 known_good_char:
			status &= ~BI;
			anomaly_start.tv_sec = 0;
		}
	}

	if (status & BI) {
		if (ANOMALY_05000363)
			if (bfin_revid() < 5)
				do_gettimeofday(&anomaly_start);
		uart->port.icount.brk++;
		if (uart_handle_break(&uart->port))
			goto ignore_char;
		status &= ~(PE | FE);
	}
	if (status & PE)
		uart->port.icount.parity++;
	if (status & OE)
		uart->port.icount.overrun++;
	if (status & FE)
		uart->port.icount.frame++;

	status &= uart->port.read_status_mask;

	if (status & BI)
		flg = TTY_BREAK;
	else if (status & PE)
		flg = TTY_PARITY;
	else if (status & FE)
		flg = TTY_FRAME;
	else
		flg = TTY_NORMAL;

	if (uart_handle_sysrq_char(&uart->port, ch))
		goto ignore_char;

	uart_insert_char(&uart->port, status, OE, ch, flg);

 ignore_char:
	tty_flip_buffer_push(tty);
}

static void bfin_serial_tx_chars(struct bfin_serial_port *uart)
{
	struct circ_buf *xmit = &uart->port.state->xmit;

	if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) {
#ifdef CONFIG_BF54x
		/* Clear TFI bit */
		UART_PUT_LSR(uart, TFI);
#endif
		/* Anomaly notes:
		 *  05000215 -	we always clear ETBEI within last UART TX
		 *		interrupt to end a string. It is always set
		 *		when start a new tx.
		 */
		UART_CLEAR_IER(uart, ETBEI);
		return;
	}

	if (uart->port.x_char) {
		UART_PUT_CHAR(uart, uart->port.x_char);
		uart->port.icount.tx++;
		uart->port.x_char = 0;
	}

	while ((UART_GET_LSR(uart) & THRE) && xmit->tail != xmit->head) {
		UART_PUT_CHAR(uart, xmit->buf[xmit->tail]);
		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
		uart->port.icount.tx++;
	}

	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
		uart_write_wakeup(&uart->port);
}

static irqreturn_t bfin_serial_rx_int(int irq, void *dev_id)
{
	struct bfin_serial_port *uart = dev_id;

	while (UART_GET_LSR(uart) & DR)
		bfin_serial_rx_chars(uart);

	return IRQ_HANDLED;
}

static irqreturn_t bfin_serial_tx_int(int irq, void *dev_id)
{
	struct bfin_serial_port *uart = dev_id;

#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
	if (uart->scts && !(bfin_serial_get_mctrl(&uart->port) & TIOCM_CTS)) {
		uart->scts = 0;
		uart_handle_cts_change(&uart->port, uart->scts);
	}
#endif
	spin_lock(&uart->port.lock);
	if (UART_GET_LSR(uart) & THRE)
		bfin_serial_tx_chars(uart);
	spin_unlock(&uart->port.lock);

	return IRQ_HANDLED;
}
#endif

#ifdef CONFIG_SERIAL_BFIN_DMA
static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart)
{
	struct circ_buf *xmit = &uart->port.state->xmit;

	uart->tx_done = 0;

	if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) {
		uart->tx_count = 0;
		uart->tx_done = 1;
		return;
	}

	if (uart->port.x_char) {
		UART_PUT_CHAR(uart, uart->port.x_char);
		uart->port.icount.tx++;
		uart->port.x_char = 0;
	}

	uart->tx_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE);
	if (uart->tx_count > (UART_XMIT_SIZE - xmit->tail))
		uart->tx_count = UART_XMIT_SIZE - xmit->tail;
	blackfin_dcache_flush_range((unsigned long)(xmit->buf+xmit->tail),
					(unsigned long)(xmit->buf+xmit->tail+uart->tx_count));
	set_dma_config(uart->tx_dma_channel,
		set_bfin_dma_config(DIR_READ, DMA_FLOW_STOP,
			INTR_ON_BUF,
			DIMENSION_LINEAR,
			DATA_SIZE_8,
			DMA_SYNC_RESTART));
	set_dma_start_addr(uart->tx_dma_channel, (unsigned long)(xmit->buf+xmit->tail));
	set_dma_x_count(uart->tx_dma_channel, uart->tx_count);
	set_dma_x_modify(uart->tx_dma_channel, 1);
	SSYNC();
	enable_dma(uart->tx_dma_channel);

	UART_SET_IER(uart, ETBEI);
}

static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart)
{
	struct tty_struct *tty = uart->port.state->port.tty;
	int i, flg, status;

	status = UART_GET_LSR(uart);
	UART_CLEAR_LSR(uart);

	uart->port.icount.rx +=
		CIRC_CNT(uart->rx_dma_buf.head, uart->rx_dma_buf.tail,
		UART_XMIT_SIZE);

	if (status & BI) {
		uart->port.icount.brk++;
		if (uart_handle_break(&uart->port))
			goto dma_ignore_char;
		status &= ~(PE | FE);
	}
	if (status & PE)
		uart->port.icount.parity++;
	if (status & OE)
		uart->port.icount.overrun++;
	if (status & FE)
		uart->port.icount.frame++;

	status &= uart->port.read_status_mask;

	if (status & BI)
		flg = TTY_BREAK;
	else if (status & PE)
		flg = TTY_PARITY;
	else if (status & FE)
		flg = TTY_FRAME;
	else
		flg = TTY_NORMAL;

	for (i = uart->rx_dma_buf.tail; ; i++) {
		if (i >= UART_XMIT_SIZE)
			i = 0;
		if (i == uart->rx_dma_buf.head)
			break;
		if (!uart_handle_sysrq_char(&uart->port, uart->rx_dma_buf.buf[i]))
			uart_insert_char(&uart->port, status, OE,
				uart->rx_dma_buf.buf[i], flg);
	}

 dma_ignore_char:
	tty_flip_buffer_push(tty);
}

void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
{
	int x_pos, pos;

	dma_disable_irq_nosync(uart->rx_dma_channel);
	spin_lock_bh(&uart->rx_lock);

	/* 2D DMA RX buffer ring is used. Because curr_y_count and
	 * curr_x_count can't be read as an atomic operation,
	 * curr_y_count should be read before curr_x_count. When
	 * curr_x_count is read, curr_y_count may already indicate
	 * next buffer line. But, the position calculated here is
	 * still indicate the old line. The wrong position data may
	 * be smaller than current buffer tail, which cause garbages
	 * are received if it is not prohibit.
	 */
	uart->rx_dma_nrows = get_dma_curr_ycount(uart->rx_dma_channel);
	x_pos = get_dma_curr_xcount(uart->rx_dma_channel);
	uart->rx_dma_nrows = DMA_RX_YCOUNT - uart->rx_dma_nrows;
	if (uart->rx_dma_nrows == DMA_RX_YCOUNT || x_pos == 0)
		uart->rx_dma_nrows = 0;
	x_pos = DMA_RX_XCOUNT - x_pos;
	if (x_pos == DMA_RX_XCOUNT)
		x_pos = 0;

	pos = uart->rx_dma_nrows * DMA_RX_XCOUNT + x_pos;
	/* Ignore receiving data if new position is in the same line of
	 * current buffer tail and small.
	 */
	if (pos > uart->rx_dma_buf.tail ||
		uart->rx_dma_nrows < (uart->rx_dma_buf.tail/DMA_RX_XCOUNT)) {
		uart->rx_dma_buf.head = pos;
		bfin_serial_dma_rx_chars(uart);
		uart->rx_dma_buf.tail = uart->rx_dma_buf.head;
	}

	spin_unlock_bh(&uart->rx_lock);
	dma_enable_irq(uart->rx_dma_channel);

	mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES);
}