Beispiel #1
0
static irqreturn_t interrupt_handler(int irq, void *dev_id)
{
	struct nozomi *dc = dev_id;
	unsigned int a;
	u16 read_iir;

	if (!dc)
		return IRQ_NONE;

	spin_lock(&dc->spin_mutex);
	read_iir = readw(dc->reg_iir);

	/* Card removed */
	if (read_iir == (u16)-1)
		goto none;
	/*
	 * Just handle interrupt enabled in IER
	 * (by masking with dc->last_ier)
	 */
	read_iir &= dc->last_ier;

	if (read_iir == 0)
		goto none;


	DBG4("%s irq:0x%04X, prev:0x%04X", interrupt2str(read_iir), read_iir,
		dc->last_ier);

	if (read_iir & RESET) {
		if (unlikely(!nozomi_read_config_table(dc))) {
			dc->last_ier = 0x0;
			writew(dc->last_ier, dc->reg_ier);
			dev_err(&dc->pdev->dev, "Could not read status from "
				"card, we should disable interface\n");
		} else {
			writew(RESET, dc->reg_fcr);
		}
		/* No more useful info if this was the reset interrupt. */
		goto exit_handler;
	}
	if (read_iir & CTRL_UL) {
		DBG1("CTRL_UL");
		dc->last_ier &= ~CTRL_UL;
		writew(dc->last_ier, dc->reg_ier);
		if (send_flow_control(dc)) {
			writew(CTRL_UL, dc->reg_fcr);
			dc->last_ier = dc->last_ier | CTRL_UL;
			writew(dc->last_ier, dc->reg_ier);
		}
	}
	if (read_iir & CTRL_DL) {
		receive_flow_control(dc);
		writew(CTRL_DL, dc->reg_fcr);
	}
	if (read_iir & MDM_DL) {
		if (!handle_data_dl(dc, PORT_MDM,
				&(dc->port[PORT_MDM].toggle_dl), read_iir,
				MDM_DL1, MDM_DL2)) {
			dev_err(&dc->pdev->dev, "MDM_DL out of sync!\n");
			goto exit_handler;
		}
	}
	if (read_iir & MDM_UL) {
		if (!handle_data_ul(dc, PORT_MDM, read_iir)) {
			dev_err(&dc->pdev->dev, "MDM_UL out of sync!\n");
			goto exit_handler;
		}
	}
	if (read_iir & DIAG_DL) {
		if (!handle_data_dl(dc, PORT_DIAG,
				&(dc->port[PORT_DIAG].toggle_dl), read_iir,
				DIAG_DL1, DIAG_DL2)) {
			dev_err(&dc->pdev->dev, "DIAG_DL out of sync!\n");
			goto exit_handler;
		}
	}
	if (read_iir & DIAG_UL) {
		dc->last_ier &= ~DIAG_UL;
		writew(dc->last_ier, dc->reg_ier);
		if (send_data(PORT_DIAG, dc)) {
			writew(DIAG_UL, dc->reg_fcr);
			dc->last_ier = dc->last_ier | DIAG_UL;
			writew(dc->last_ier, dc->reg_ier);
		}
	}
	if (read_iir & APP1_DL) {
		if (receive_data(PORT_APP1, dc))
			writew(APP1_DL, dc->reg_fcr);
	}
	if (read_iir & APP1_UL) {
		dc->last_ier &= ~APP1_UL;
		writew(dc->last_ier, dc->reg_ier);
		if (send_data(PORT_APP1, dc)) {
			writew(APP1_UL, dc->reg_fcr);
			dc->last_ier = dc->last_ier | APP1_UL;
			writew(dc->last_ier, dc->reg_ier);
		}
	}
	if (read_iir & APP2_DL) {
		if (receive_data(PORT_APP2, dc))
			writew(APP2_DL, dc->reg_fcr);
	}
	if (read_iir & APP2_UL) {
		dc->last_ier &= ~APP2_UL;
		writew(dc->last_ier, dc->reg_ier);
		if (send_data(PORT_APP2, dc)) {
			writew(APP2_UL, dc->reg_fcr);
			dc->last_ier = dc->last_ier | APP2_UL;
			writew(dc->last_ier, dc->reg_ier);
		}
	}

exit_handler:
	spin_unlock(&dc->spin_mutex);

	for (a = 0; a < NOZOMI_MAX_PORTS; a++)
		if (test_and_clear_bit(a, &dc->flip))
			tty_flip_buffer_push(&dc->port[a].port);

	return IRQ_HANDLED;
none:
	spin_unlock(&dc->spin_mutex);
	return IRQ_NONE;
}
/*
 * Receive characters
 */
static void cpm_uart_int_rx(struct uart_port *port, struct pt_regs *regs)
{
    int i;
    unsigned char ch, *cp;
    struct tty_struct *tty = port->info->tty;
    struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
    volatile cbd_t *bdp;
    u16 status;
    unsigned int flg;

    pr_debug("CPM uart[%d]:RX INT\n", port->line);

    /* Just loop through the closed BDs and copy the characters into
     * the buffer.
     */
    bdp = pinfo->rx_cur;
    for (;;) {
        /* get status */
        status = bdp->cbd_sc;
        /* If this one is empty, return happy */
        if (status & BD_SC_EMPTY)
            break;

        /* get number of characters, and check spce in flip-buffer */
        i = bdp->cbd_datlen;

        /* If we have not enough room in tty flip buffer, then we try
         * later, which will be the next rx-interrupt or a timeout
         */
        if ((tty->flip.count + i) >= TTY_FLIPBUF_SIZE) {
            tty->flip.work.func((void *)tty);
            if ((tty->flip.count + i) >= TTY_FLIPBUF_SIZE) {
                printk(KERN_WARNING "TTY_DONT_FLIP set\n");
                return;
            }
        }

        /* get pointer */
        cp = (unsigned char *)bus_to_virt(bdp->cbd_bufaddr);

        /* loop through the buffer */
        while (i-- > 0) {
            ch = *cp++;
            port->icount.rx++;
            flg = TTY_NORMAL;

            if (status &
                    (BD_SC_BR | BD_SC_FR | BD_SC_PR | BD_SC_OV))
                goto handle_error;
            if (uart_handle_sysrq_char(port, ch, regs))
                continue;

error_return:
            *tty->flip.char_buf_ptr++ = ch;
            *tty->flip.flag_buf_ptr++ = flg;
            tty->flip.count++;

        }		/* End while (i--) */

        /* This BD is ready to be used again. Clear status. get next */
        bdp->cbd_sc &= ~(BD_SC_BR | BD_SC_FR | BD_SC_PR | BD_SC_OV);
        bdp->cbd_sc |= BD_SC_EMPTY;

        if (bdp->cbd_sc & BD_SC_WRAP)
            bdp = pinfo->rx_bd_base;
        else
            bdp++;
    } /* End for (;;) */

    /* Write back buffer pointer */
    pinfo->rx_cur = (volatile cbd_t *) bdp;

    /* activate BH processing */
    tty_flip_buffer_push(tty);

    return;

    /* Error processing */

handle_error:
    /* Statistics */
    if (status & BD_SC_BR)
        port->icount.brk++;
    if (status & BD_SC_PR)
        port->icount.parity++;
    if (status & BD_SC_FR)
        port->icount.frame++;
    if (status & BD_SC_OV)
        port->icount.overrun++;

    /* Mask out ignored conditions */
    status &= port->read_status_mask;

    /* Handle the remaining ones */
    if (status & BD_SC_BR)
        flg = TTY_BREAK;
    else if (status & BD_SC_PR)
        flg = TTY_PARITY;
    else if (status & BD_SC_FR)
        flg = TTY_FRAME;

    /* overrun does not affect the current character ! */
    if (status & BD_SC_OV) {
        ch = 0;
        flg = TTY_OVERRUN;
        /* We skip this buffer */
        /* CHECK: Is really nothing senseful there */
        /* ASSUMPTION: it contains nothing valid */
        i = 0;
    }
#ifdef SUPPORT_SYSRQ
    port->sysrq = 0;
#endif
    goto error_return;
}
Beispiel #3
0
/**
 * cdns_uart_isr - Interrupt handler
 * @irq: Irq number
 * @dev_id: Id of the port
 *
 * Return: IRQHANDLED
 */
static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
{
	struct uart_port *port = (struct uart_port *)dev_id;
	unsigned long flags;
	unsigned int isrstatus, numbytes;
	unsigned int data;
	char status = TTY_NORMAL;

	spin_lock_irqsave(&port->lock, flags);

	/* Read the interrupt status register to determine which
	 * interrupt(s) is/are active.
	 */
	isrstatus = cdns_uart_readl(CDNS_UART_ISR_OFFSET);

	/*
	 * There is no hardware break detection, so we interpret framing
	 * error with all-zeros data as a break sequence. Most of the time,
	 * there's another non-zero byte at the end of the sequence.
	 */
	if (isrstatus & CDNS_UART_IXR_FRAMING) {
		while (!(cdns_uart_readl(CDNS_UART_SR_OFFSET) &
					CDNS_UART_SR_RXEMPTY)) {
			if (!cdns_uart_readl(CDNS_UART_FIFO_OFFSET)) {
				port->read_status_mask |= CDNS_UART_IXR_BRK;
				isrstatus &= ~CDNS_UART_IXR_FRAMING;
			}
		}
		cdns_uart_writel(CDNS_UART_IXR_FRAMING, CDNS_UART_ISR_OFFSET);
	}

	/* drop byte with parity error if IGNPAR specified */
	if (isrstatus & port->ignore_status_mask & CDNS_UART_IXR_PARITY)
		isrstatus &= ~(CDNS_UART_IXR_RXTRIG | CDNS_UART_IXR_TOUT);

	isrstatus &= port->read_status_mask;
	isrstatus &= ~port->ignore_status_mask;

	if ((isrstatus & CDNS_UART_IXR_TOUT) ||
		(isrstatus & CDNS_UART_IXR_RXTRIG)) {
		/* Receive Timeout Interrupt */
		while ((cdns_uart_readl(CDNS_UART_SR_OFFSET) &
			CDNS_UART_SR_RXEMPTY) != CDNS_UART_SR_RXEMPTY) {
			data = cdns_uart_readl(CDNS_UART_FIFO_OFFSET);

			/* Non-NULL byte after BREAK is garbage (99%) */
			if (data && (port->read_status_mask &
						CDNS_UART_IXR_BRK)) {
				port->read_status_mask &= ~CDNS_UART_IXR_BRK;
				port->icount.brk++;
				if (uart_handle_break(port))
					continue;
			}

#ifdef SUPPORT_SYSRQ
			/*
			 * uart_handle_sysrq_char() doesn't work if
			 * spinlocked, for some reason
			 */
			 if (port->sysrq) {
				spin_unlock(&port->lock);
				if (uart_handle_sysrq_char(port,
							(unsigned char)data)) {
					spin_lock(&port->lock);
					continue;
				}
				spin_lock(&port->lock);
			}
#endif

			port->icount.rx++;

			if (isrstatus & CDNS_UART_IXR_PARITY) {
				port->icount.parity++;
				status = TTY_PARITY;
			} else if (isrstatus & CDNS_UART_IXR_FRAMING) {
				port->icount.frame++;
				status = TTY_FRAME;
			} else if (isrstatus & CDNS_UART_IXR_OVERRUN) {
				port->icount.overrun++;
			}

			uart_insert_char(port, isrstatus, CDNS_UART_IXR_OVERRUN,
					data, status);
		}
		spin_unlock(&port->lock);
		tty_flip_buffer_push(&port->state->port);
		spin_lock(&port->lock);
	}

	/* Dispatch an appropriate handler */
	if ((isrstatus & CDNS_UART_IXR_TXEMPTY) == CDNS_UART_IXR_TXEMPTY) {
		if (uart_circ_empty(&port->state->xmit)) {
			cdns_uart_writel(CDNS_UART_IXR_TXEMPTY,
						CDNS_UART_IDR_OFFSET);
		} else {
			numbytes = port->fifosize;
			/* Break if no more data available in the UART buffer */
			while (numbytes--) {
				if (uart_circ_empty(&port->state->xmit))
					break;
				/* Get the data from the UART circular buffer
				 * and write it to the cdns_uart's TX_FIFO
				 * register.
				 */
				cdns_uart_writel(
					port->state->xmit.buf[port->state->xmit.
					tail], CDNS_UART_FIFO_OFFSET);

				port->icount.tx++;

				/* Adjust the tail of the UART buffer and wrap
				 * the buffer if it reaches limit.
				 */
				port->state->xmit.tail =
					(port->state->xmit.tail + 1) &
						(UART_XMIT_SIZE - 1);
			}

			if (uart_circ_chars_pending(
					&port->state->xmit) < WAKEUP_CHARS)
				uart_write_wakeup(port);
		}
	}

	cdns_uart_writel(isrstatus, CDNS_UART_ISR_OFFSET);

	/* be sure to release the lock and tty before leaving */
	spin_unlock_irqrestore(&port->lock, flags);

	return IRQ_HANDLED;
}
/*
 * Interrupt routine, called from Ingo's I/O layer
 */
static void raw3215_irq(int irq, void *int_parm, struct pt_regs *regs)
{
	raw3215_info *raw;
	raw3215_req *req;
	struct tty_struct *tty;
	devstat_t *stat;
        int cstat, dstat;
	int count, slen;

	stat = (devstat_t *) int_parm;
	req = (raw3215_req *) stat->intparm;
	cstat = stat->cstat;
	dstat = stat->dstat;
	if (cstat != 0) {
		raw = raw3215_find_info(irq);
		if (raw != NULL) {
			raw->message = KERN_WARNING
				"Got nonzero channel status in raw3215_irq "
				"(dev %i, dev sts 0x%2x, sch sts 0x%2x)";
			raw->msg_dstat = dstat;
			raw->msg_cstat = cstat;
                        raw3215_sched_bh(raw);
		}
	}
        if (dstat & 0x01) { /* we got a unit exception */
		dstat &= ~0x01;  /* we can ignore it */
        }
	switch (dstat) {
	case 0x80:
		if (cstat != 0)
			break;
		/* Attention interrupt, someone hit the enter key */
		if ((raw = raw3215_find_info(irq)) == NULL)
			return;              /* That shouldn't happen ... */
		/* Setup a read request */
		raw3215_mk_read_req(raw);
                if (MACHINE_IS_P390)
                        memset(raw->inbuf, 0, RAW3215_INBUF_SIZE);
                raw3215_sched_bh(raw);
		break;
	case 0x08:
	case 0x0C:
		/* Channel end interrupt. */
		if ((raw = req->info) == NULL)
                        return;              /* That shouldn't happen ... */
		if (req->type == RAW3215_READ) {
			/* store residual count, then wait for device end */
			req->residual = stat->rescnt;
		}
		if (dstat == 0x08)
			break;
	case 0x04:
		/* Device end interrupt. */
                if ((raw = req->info) == NULL)
                        return;              /* That shouldn't happen ... */
		if (req->type == RAW3215_READ && raw->tty != NULL) {
			unsigned int cchar;

			tty = raw->tty;
			count = 160 - req->residual;
			if (MACHINE_IS_P390) {
				slen = strnlen(raw->inbuf, RAW3215_INBUF_SIZE);
				if (count > slen)
					count = slen;
			} else
			if (count >= TTY_FLIPBUF_SIZE - tty->flip.count)
				count = TTY_FLIPBUF_SIZE - tty->flip.count - 1;
			EBCASC(raw->inbuf, count);
			cchar = ctrlchar_handle(raw->inbuf, count, tty);
			switch (cchar & CTRLCHAR_MASK) {
			case CTRLCHAR_SYSRQ:
				break;

			case CTRLCHAR_CTRL:
				tty->flip.count++;
				*tty->flip.flag_buf_ptr++ = TTY_NORMAL;
				*tty->flip.char_buf_ptr++ = cchar;
				tty_flip_buffer_push(raw->tty);
				break;

			case CTRLCHAR_NONE:
				memcpy(tty->flip.char_buf_ptr,
				       raw->inbuf, count);
				if (count < 2 ||
				    (strncmp(raw->inbuf+count-2, "^n", 2) && 
				    strncmp(raw->inbuf+count-2, "\252n", 2)) ) {
					/* don't add the auto \n */
					tty->flip.char_buf_ptr[count] = '\n';
					memset(tty->flip.flag_buf_ptr,
					       TTY_NORMAL, count + 1);
					count++;
				} else
					count-=2;
				tty->flip.char_buf_ptr += count;
				tty->flip.flag_buf_ptr += count;
				tty->flip.count += count;
				tty_flip_buffer_push(raw->tty);
				break;
			}
		} else if (req->type == RAW3215_WRITE) {
			raw->count -= req->len;
                        raw->written -= req->len;
		} 
		raw->flags &= ~RAW3215_WORKING;
		raw3215_free_req(req);
		/* check for empty wait */
		if (waitqueue_active(&raw->empty_wait) &&
		    raw->queued_write == NULL &&
		    raw->queued_read == NULL) {
			wake_up_interruptible(&raw->empty_wait);
		}
                raw3215_sched_bh(raw);
		break;
	default:
		/* Strange interrupt, I'll do my best to clean up */
                if ((raw = raw3215_find_info(irq)) == NULL)
                        return;              /* That shouldn't happen ... */
                if (raw == NULL) break;
		if (req != NULL && req->type != RAW3215_FREE) {
		        if (req->type == RAW3215_WRITE) {
			        raw->count -= req->len;
                                raw->written -= req->len;
                        }
                        raw->flags &= ~RAW3215_WORKING;
                        raw3215_free_req(req);
		}
		raw->message = KERN_WARNING
			"Spurious interrupt in in raw3215_irq "
			"(dev %i, dev sts 0x%2x, sch sts 0x%2x)";
		raw->msg_dstat = dstat;
		raw->msg_cstat = cstat;
                raw3215_sched_bh(raw);
	}
	return;
}
Beispiel #5
0
static void symbol_int_callback(struct urb *urb)
{
    struct symbol_private *priv = urb->context;
    unsigned char *data = urb->transfer_buffer;
    struct usb_serial_port *port = priv->port;
    int status = urb->status;
    struct tty_struct *tty;
    int result;
    int available_room = 0;
    int data_length;

    dbg("%s - port %d", __func__, port->number);

    switch (status) {
    case 0:

        break;
    case -ECONNRESET:
    case -ENOENT:
    case -ESHUTDOWN:

        dbg("%s - urb shutting down with status: %d",
            __func__, status);
        return;
    default:
        dbg("%s - nonzero urb status received: %d",
            __func__, status);
        goto exit;
    }

    usb_serial_debug_data(debug, &port->dev, __func__, urb->actual_length,
                          data);

    if (urb->actual_length > 1) {
        data_length = urb->actual_length - 1;


        tty = tty_port_tty_get(&port->port);
        if (tty) {
            available_room = tty_buffer_request_room(tty,
                             data_length);
            if (available_room) {
                tty_insert_flip_string(tty, &data[1],
                                       available_room);
                tty_flip_buffer_push(tty);
            }
            tty_kref_put(tty);
        }
    } else {
        dev_dbg(&priv->udev->dev,
                "Improper ammount of data received from the device, "
                "%d bytes", urb->actual_length);
    }

exit:
    spin_lock(&priv->lock);


    if (!priv->throttled) {
        usb_fill_int_urb(priv->int_urb, priv->udev,
                         usb_rcvintpipe(priv->udev,
                                        priv->int_address),
                         priv->int_buffer, priv->buffer_size,
                         symbol_int_callback, priv, priv->bInterval);
        result = usb_submit_urb(priv->int_urb, GFP_ATOMIC);
        if (result)
            dev_err(&port->dev,
                    "%s - failed resubmitting read urb, error %d\n",
                    __func__, result);
    } else
        priv->actually_throttled = true;
    spin_unlock(&priv->lock);
}
Beispiel #6
0
static inline void sci_receive_chars(struct uart_port *port)
{
	struct sci_port *sci_port = (struct sci_port *)port;
	struct tty_struct *tty = port->info->port.tty;
	int i, count, copied = 0;
	unsigned short status;
	unsigned char flag;

	status = sci_in(port, SCxSR);
	if (!(status & SCxSR_RDxF(port)))
		return;

	while (1) {
		if (port->type == PORT_SCIF)
			count = scif_rxroom(port);
		else
			count = sci_rxroom(port);

		/* Don't copy more bytes than there is room for in the buffer */
		count = tty_buffer_request_room(tty, count);

		/* If for any reason we can't copy more data, we're done! */
		if (count == 0)
			break;

		if (port->type == PORT_SCI) {
			char c = sci_in(port, SCxRDR);
			if (uart_handle_sysrq_char(port, c) || sci_port->break_flag)
				count = 0;
			else {
				tty_insert_flip_char(tty, c, TTY_NORMAL);
			}
		} else {
			for (i=0; i<count; i++) {
				char c = sci_in(port, SCxRDR);
				status = sci_in(port, SCxSR);
#if defined(CONFIG_CPU_SH3)
				/* Skip "chars" during break */
				if (sci_port->break_flag) {
					if ((c == 0) &&
					    (status & SCxSR_FER(port))) {
						count--; i--;
						continue;
					}

					/* Nonzero => end-of-break */
					pr_debug("scif: debounce<%02x>\n", c);
					sci_port->break_flag = 0;

					if (STEPFN(c)) {
						count--; i--;
						continue;
					}
				}
#endif /* CONFIG_CPU_SH3 */
				if (uart_handle_sysrq_char(port, c)) {
					count--; i--;
					continue;
				}

				/* Store data and status */
				if (status&SCxSR_FER(port)) {
					flag = TTY_FRAME;
					pr_debug("sci: frame error\n");
				} else if (status&SCxSR_PER(port)) {
					flag = TTY_PARITY;
					pr_debug("sci: parity error\n");
				} else
					flag = TTY_NORMAL;
				tty_insert_flip_char(tty, c, flag);
			}
		}

		sci_in(port, SCxSR); /* dummy read */
		sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));

		copied += count;
		port->icount.rx += count;
	}

	if (copied) {
		/* Tell the rest of the system the news. New characters! */
		tty_flip_buffer_push(tty);
	} else {
		sci_in(port, SCxSR); /* dummy read */
		sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
	}
}
static void ccci_tty_read(unsigned long arg)
{
    int             part, size, accept, ret;
    unsigned        read, write;
    tty_instance_t *tty_instance = (tty_instance_t *) arg;

    if (tty_instance->tty == NULL) {
        has_pending_read = 1;
        CCCI_MSG_INF("tty", "NULL tty @ read\n");
        return;
    }
    else if ((tty_instance->tty->index == CCCI_TTY_MODEM) && (is_meta_mode()||is_advanced_meta_mode())) {
        //  Do not allow writes to the modem when in Meta Mode.
        //  Otherwise, the modem firmware will crash.

        CCCI_MSG_INF("tty", "Attempted read from modem while in meta mode\n");     
        return;
    }
    

    read  = tty_instance->shared_mem->rx_control.read;
    write = tty_instance->shared_mem->rx_control.write; 
    size  = write - read;

    /*ALPS00241537: if there is no data in share memory, not copy and send message to MD*/
    /*because total size is (length-1) which is handled in MD write API, size=0 only indicates memory is empty*/
    if(size == 0) {
        //CCCI_MSG_INF("tty", "ttyC%d share memory is empty! \n", tty_instance->tty->index);
        return;
    }
    
    if (size < 0) {
        size += tty_instance->shared_mem->rx_control.length;
    }

	if(tty_debug_enable & (1UL << tty_instance->tty->index))
		CCCI_MSG_INF("tty", "[before Read]:[RX] tty=%04d data_len=%04d write=%04d read=%04d \n",
         tty_instance->tty->index, size, write, read); 
	

    if (read > write) {
        part = tty_instance->shared_mem->rx_control.length - read;
        memcpy(tty_instance->flip_string, &tty_instance->shared_mem->rx_buffer[read], part);          
        accept = tty_insert_flip_string(tty_instance->tty, tty_instance->flip_string, part);

        if (accept < part) {
            size -= accept;
            read += accept;

            goto __ccci_read_ack;
        }
        else {
            size -= part;
            read  = 0;
        }
    }

    memcpy(tty_instance->flip_string, &tty_instance->shared_mem->rx_buffer[read], size);
    accept = tty_insert_flip_string(tty_instance->tty, tty_instance->flip_string, size);

    if (accept < size) {
        size -= accept;
        read += accept;
    }
    else {
        size  = 0;
        read += accept;
    }
    
  __ccci_read_ack:
    
    tty_instance->shared_mem->rx_control.read = read;
    
    ret = ccci_write_mailbox(tty_instance->uart_rx_ack, tty_instance->channel);
    if (ret != CCCI_SUCCESS) {
        CCCI_MSG_INF("tty", "ccci_write_mailbox for %d fail: %d\n",
               tty_instance->tty->index, ret);
        ccci_channel_status(tty_instance->uart_rx_ack);
		
		// axs: mask assert which will induce device reboot
        //ASSERT(0);
		// axs: mask assert which will induce device reboot
    }

   if(tty_debug_enable & (1UL << tty_instance->tty->index))
		CCCI_MSG_INF("tty", "[after  Read]:[RX] tty=%04d data_len=%04d write=%04d read=%4d\n",
			tty_instance->tty->index, accept, tty_instance->shared_mem->rx_control.write, 
		    tty_instance->shared_mem->rx_control.read);        
    
    wake_lock_timeout(&tty_instance->wake_lock, HZ / 2);
    tty_flip_buffer_push(tty_instance->tty);
}
Beispiel #8
0
/* Modem_response command */
static int lge_dm_tty_modem_response(struct dm_tty *lge_dm_tty_drv,
			const unsigned char *buf, int count)
{
	int num_push = 0;
	int left = 0;
	int total_push = 0;

	struct timeval time;
	int start_flag_length;
	int end_flag_length;

	if (count == 0)
		return 0;

	if(lge_dm_tty_drv->logging_mode == DM_APP_SDM)
	{

		/* make start flag */
		memcpy(dm_modem_response, &dm_rx_start_flag,
			sizeof(dm_rx_start_flag));

		start_flag_length = sizeof(dm_rx_start_flag);

		/* make header */
		dm_modem_response_header->dm_router_size =
			dm_modem_response_header_length +
				dm_modem_response_body_length + count;

		memcpy(dm_modem_response + start_flag_length,
			dm_modem_response_header,
				dm_modem_response_header_length);

		/* make body */
		dm_modem_response_body->modem_chip = Primary_modem_chip;

		do_gettimeofday(&time);

		memcpy(&(dm_modem_response_body->local_time), &time,
			sizeof(struct timeval));

		memcpy(dm_modem_response + start_flag_length +
			dm_modem_response_header_length,
				dm_modem_response_body,
					dm_modem_response_body_length);

		if(buf != NULL){
		memcpy(dm_modem_response + start_flag_length +
			dm_modem_response_header_length +
				dm_modem_response_body_length, buf, count);
		}else{
			printk("[DM_APP]buf is null!\n");
		}

		dm_modem_response_length =
			dm_modem_response_header->dm_router_size +
				start_flag_length;

		/* make end flag */
		memcpy(dm_modem_response + dm_modem_response_length,
			&dm_rx_end_flag, sizeof(dm_rx_end_flag));

		end_flag_length = sizeof(dm_rx_end_flag);

		dm_modem_response_length = dm_modem_response_length +
			end_flag_length;

		/* send modem_response packet to DM router */
		total_push = 0;
		left = dm_modem_response_length;

		do {
			num_push = tty_insert_flip_string(lge_dm_tty_drv->tty_str,
				dm_modem_response + total_push, left);
			total_push += num_push;
			left -= num_push;
			tty_flip_buffer_push(lge_dm_tty_drv->tty_str);
		} while (left != 0);
	}
	else if(lge_dm_tty_drv->logging_mode == DM_APP_ODM)	
	{
		total_push = 0;
		left = count;

		do {
		num_push = tty_insert_flip_string(lge_dm_tty_drv->tty_str,
			buf + total_push, left);
		total_push += num_push;
		left -= num_push;
		tty_flip_buffer_push(lge_dm_tty_drv->tty_str);
		} while (left != 0);
	}

	return total_push;
}
Beispiel #9
0
static int sprd_rx_dma_config(struct uart_port *port, u32 burst)
{
	struct sprd_uart_port *sp =
		container_of(port, struct sprd_uart_port, port);
	struct dma_slave_config cfg = {
		.src_addr = port->mapbase + SPRD_RXD,
		.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
		.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
		.src_maxburst = burst,
	};

	return dmaengine_slave_config(sp->rx_dma.chn, &cfg);
}

static void sprd_uart_dma_rx(struct uart_port *port)
{
	struct sprd_uart_port *sp =
		container_of(port, struct sprd_uart_port, port);
	struct tty_port *tty = &port->state->port;

	port->icount.rx += sp->rx_dma.trans_len;
	tty_insert_flip_string(tty, sp->rx_buf_tail, sp->rx_dma.trans_len);
	tty_flip_buffer_push(tty);
}

static void sprd_uart_dma_irq(struct uart_port *port)
{
	struct sprd_uart_port *sp =
		container_of(port, struct sprd_uart_port, port);
	struct dma_tx_state state;
	enum dma_status status;

	status = dmaengine_tx_status(sp->rx_dma.chn,
				     sp->rx_dma.cookie, &state);
	if (status == DMA_ERROR)
		sprd_stop_rx(port);

	if (!state.residue && sp->pos == sp->rx_dma.phys_addr)
		return;

	if (!state.residue) {
		sp->rx_dma.trans_len = SPRD_UART_RX_SIZE +
			sp->rx_dma.phys_addr - sp->pos;
		sp->pos = sp->rx_dma.phys_addr;
	} else {
		sp->rx_dma.trans_len = state.residue - sp->pos;
		sp->pos = state.residue;
	}

	sprd_uart_dma_rx(port);
	sp->rx_buf_tail += sp->rx_dma.trans_len;
}

static void sprd_complete_rx_dma(void *data)
{
	struct uart_port *port = (struct uart_port *)data;
	struct sprd_uart_port *sp =
		container_of(port, struct sprd_uart_port, port);
	struct dma_tx_state state;
	enum dma_status status;
	unsigned long flags;

	spin_lock_irqsave(&port->lock, flags);

	status = dmaengine_tx_status(sp->rx_dma.chn,
				     sp->rx_dma.cookie, &state);
	if (status != DMA_COMPLETE) {
		sprd_stop_rx(port);
		spin_unlock_irqrestore(&port->lock, flags);
		return;
	}

	if (sp->pos != sp->rx_dma.phys_addr) {
		sp->rx_dma.trans_len =  SPRD_UART_RX_SIZE +
			sp->rx_dma.phys_addr - sp->pos;
		sprd_uart_dma_rx(port);
		sp->rx_buf_tail += sp->rx_dma.trans_len;
	}

	if (sprd_start_dma_rx(port))
		sprd_stop_rx(port);

	spin_unlock_irqrestore(&port->lock, flags);
}

static int sprd_start_dma_rx(struct uart_port *port)
{
	struct sprd_uart_port *sp =
		container_of(port, struct sprd_uart_port, port);
	int ret;

	if (!sp->rx_dma.enable)
		return 0;

	sp->pos = sp->rx_dma.phys_addr;
	sp->rx_buf_tail = sp->rx_dma.virt;
	sprd_rx_full_thld(port, SPRD_RX_FIFO_FULL);
	ret = sprd_rx_dma_config(port, SPRD_RX_DMA_STEP);
	if (ret)
		return ret;

	return sprd_uart_dma_submit(port, &sp->rx_dma, SPRD_UART_RX_SIZE,
				    DMA_DEV_TO_MEM, sprd_complete_rx_dma);
}

static void sprd_release_dma(struct uart_port *port)
{
	struct sprd_uart_port *sp =
		container_of(port, struct sprd_uart_port, port);

	sprd_uart_dma_enable(port, false);

	if (sp->rx_dma.enable)
		dma_release_channel(sp->rx_dma.chn);

	if (sp->tx_dma.enable)
		dma_release_channel(sp->tx_dma.chn);

	sp->tx_dma.enable = false;
	sp->rx_dma.enable = false;
}

static void sprd_request_dma(struct uart_port *port)
{
	struct sprd_uart_port *sp =
		container_of(port, struct sprd_uart_port, port);

	sp->tx_dma.enable = true;
	sp->rx_dma.enable = true;

	sp->tx_dma.chn = dma_request_chan(port->dev, "tx");
	if (IS_ERR(sp->tx_dma.chn)) {
		dev_err(port->dev, "request TX DMA channel failed, ret = %ld\n",
			PTR_ERR(sp->tx_dma.chn));
		sp->tx_dma.enable = false;
	}

	sp->rx_dma.chn = dma_request_chan(port->dev, "rx");
	if (IS_ERR(sp->rx_dma.chn)) {
		dev_err(port->dev, "request RX DMA channel failed, ret = %ld\n",
			PTR_ERR(sp->rx_dma.chn));
		sp->rx_dma.enable = false;
	}
}

static void sprd_stop_tx(struct uart_port *port)
{
	struct sprd_uart_port *sp = container_of(port, struct sprd_uart_port,
						 port);
	unsigned int ien, iclr;

	if (sp->tx_dma.enable) {
		sprd_stop_tx_dma(port);
		return;
	}

	iclr = serial_in(port, SPRD_ICLR);
	ien = serial_in(port, SPRD_IEN);

	iclr |= SPRD_IEN_TX_EMPTY;
	ien &= ~SPRD_IEN_TX_EMPTY;

	serial_out(port, SPRD_IEN, ien);
	serial_out(port, SPRD_ICLR, iclr);
}

static void sprd_start_tx(struct uart_port *port)
{
	struct sprd_uart_port *sp = container_of(port, struct sprd_uart_port,
						 port);
	unsigned int ien;

	if (sp->tx_dma.enable) {
		sprd_start_tx_dma(port);
		return;
	}

	ien = serial_in(port, SPRD_IEN);
	if (!(ien & SPRD_IEN_TX_EMPTY)) {
		ien |= SPRD_IEN_TX_EMPTY;
		serial_out(port, SPRD_IEN, ien);
	}
}

/* The Sprd serial does not support this function. */
static void sprd_break_ctl(struct uart_port *port, int break_state)
{
	/* nothing to do */
}

static int handle_lsr_errors(struct uart_port *port,
			     unsigned int *flag,
			     unsigned int *lsr)
{
	int ret = 0;

	/* statistics */
	if (*lsr & SPRD_LSR_BI) {
		*lsr &= ~(SPRD_LSR_FE | SPRD_LSR_PE);
		port->icount.brk++;
		ret = uart_handle_break(port);
		if (ret)
			return ret;
	} else if (*lsr & SPRD_LSR_PE)
		port->icount.parity++;
	else if (*lsr & SPRD_LSR_FE)
		port->icount.frame++;
	if (*lsr & SPRD_LSR_OE)
		port->icount.overrun++;

	/* mask off conditions which should be ignored */
	*lsr &= port->read_status_mask;
	if (*lsr & SPRD_LSR_BI)
		*flag = TTY_BREAK;
	else if (*lsr & SPRD_LSR_PE)
		*flag = TTY_PARITY;
	else if (*lsr & SPRD_LSR_FE)
		*flag = TTY_FRAME;

	return ret;
}
Beispiel #10
0
/* Handle data receiving */
static void max3107_handlerx(struct max3107_port *s, u16 rxlvl)
{
	int i;
	int j;
	int len;				/* SPI transfer buffer length */
	u16 *buf;
	u8 *valid_str;

	if (!s->rx_enabled)
		/* RX is disabled */
		return;

	if (rxlvl == 0) {
		/* RX fifo is empty */
		return;
	} else if (rxlvl >= MAX3107_RX_FIFO_SIZE) {
		dev_warn(&s->spi->dev, "Possible RX FIFO overrun %d\n", rxlvl);
		/* Ensure sanity of RX level */
		rxlvl = MAX3107_RX_FIFO_SIZE;
	}
	if ((s->rxbuf == 0) || (s->rxstr == 0)) {
		dev_warn(&s->spi->dev, "Rx buffer/str isn't ready\n");
		return;
	}
	buf = s->rxbuf;
	valid_str = s->rxstr;
	while (rxlvl) {
		pr_debug("rxlvl %d\n", rxlvl);
		/* Clear buffer */
		memset(buf, 0, sizeof(u16) * (MAX3107_RX_FIFO_SIZE + 2));
		len = 0;
		if (s->irqen_reg & MAX3107_IRQ_RXFIFO_BIT) {
			/* First disable RX FIFO interrupt */
			pr_debug("Disabling RX INT\n");
			buf[0] = (MAX3107_WRITE_BIT | MAX3107_IRQEN_REG);
			s->irqen_reg &= ~MAX3107_IRQ_RXFIFO_BIT;
			buf[0] |= s->irqen_reg;
			len++;
		}
		/* Just increase the length by amount of words in FIFO since
		 * buffer was zeroed and SPI transfer of 0x0000 means reading
		 * from RX FIFO
		 */
		len += rxlvl;
		/* Append RX level query */
		buf[len] = MAX3107_RXFIFOLVL_REG;
		len++;

		/* Perform the SPI transfer */
		if (max3107_rw(s, (u8 *)buf, (u8 *)buf, len * 2)) {
			dev_err(&s->spi->dev, "SPI transfer for RX h failed\n");
			return;
		}

		/* Skip RX FIFO interrupt disabling word if it was added */
		j = ((len - 1) - rxlvl);
		/* Read received words */
		for (i = 0; i < rxlvl; i++, j++)
			valid_str[i] = (u8)buf[j];
		put_data_to_circ_buf(s, valid_str, rxlvl);
		/* Get new RX level */
		rxlvl = (buf[len - 1] & MAX3107_SPI_RX_DATA_MASK);
	}

	if (s->rx_enabled) {
		/* RX still enabled, re-enable RX FIFO interrupt */
		pr_debug("Enabling RX INT\n");
		buf[0] = (MAX3107_WRITE_BIT | MAX3107_IRQEN_REG);
		s->irqen_reg |= MAX3107_IRQ_RXFIFO_BIT;
		buf[0] |= s->irqen_reg;
		if (max3107_rw(s, (u8 *)buf, NULL, 2))
			dev_err(&s->spi->dev, "RX FIFO INT enabling failed\n");
	}

	/* Push the received data to receivers */
	if (s->port.state->port.tty)
		tty_flip_buffer_push(s->port.state->port.tty);
}
static void mct_u232_read_int_callback(struct urb *urb)
{
	struct usb_serial_port *port = urb->context;
	struct mct_u232_private *priv = usb_get_serial_port_data(port);
	struct usb_serial *serial = port->serial;
	struct tty_struct *tty;
	unsigned char *data = urb->transfer_buffer;
	int retval;
	int status = urb->status;
	unsigned long flags;

	switch (status) {
	case 0:
		/* success */
		break;
	case -ECONNRESET:
	case -ENOENT:
	case -ESHUTDOWN:
		/* this urb is terminated, clean up */
		dbg("%s - urb shutting down with status: %d",
		    __func__, status);
		return;
	default:
		dbg("%s - nonzero urb status received: %d",
		    __func__, status);
		goto exit;
	}

	if (!serial) {
		dbg("%s - bad serial pointer, exiting", __func__);
		return;
	}

	dbg("%s - port %d", __func__, port->number);
	usb_serial_debug_data(debug, &port->dev, __func__,
					urb->actual_length, data);

	/*
	 * Work-a-round: handle the 'usual' bulk-in pipe here
	 */
	if (urb->transfer_buffer_length > 2) {
		tty = tty_port_tty_get(&port->port);
		if (urb->actual_length) {
			tty_insert_flip_string(tty, data, urb->actual_length);
			tty_flip_buffer_push(tty);
			tty_kref_put(tty);
		}
		goto exit;
	}

	/*
	 * The interrupt-in pipe signals exceptional conditions (modem line
	 * signal changes and errors). data[0] holds MSR, data[1] holds LSR.
	 */
	spin_lock_irqsave(&priv->lock, flags);
	priv->last_msr = data[MCT_U232_MSR_INDEX];

	/* Record Control Line states */
	mct_u232_msr_to_state(&priv->control_state, priv->last_msr);

#if 0
	/* Not yet handled. See belkin_sa.c for further information */
	/* Now to report any errors */
	priv->last_lsr = data[MCT_U232_LSR_INDEX];
	/*
	 * fill in the flip buffer here, but I do not know the relation
	 * to the current/next receive buffer or characters.  I need
	 * to look in to this before committing any code.
	 */
	if (priv->last_lsr & MCT_U232_LSR_ERR) {
		tty = tty_port_tty_get(&port->port);
		/* Overrun Error */
		if (priv->last_lsr & MCT_U232_LSR_OE) {
		}
		/* Parity Error */
		if (priv->last_lsr & MCT_U232_LSR_PE) {
		}
		/* Framing Error */
		if (priv->last_lsr & MCT_U232_LSR_FE) {
		}
		/* Break Indicator */
		if (priv->last_lsr & MCT_U232_LSR_BI) {
		}
		tty_kref_put(tty);
	}
#endif
	spin_unlock_irqrestore(&priv->lock, flags);
exit:
	retval = usb_submit_urb(urb, GFP_ATOMIC);
	if (retval)
		dev_err(&port->dev,
			"%s - usb_submit_urb failed with result %d\n",
			__func__, retval);
} /* mct_u232_read_int_callback */
Beispiel #12
0
static void acm_rx_tasklet(unsigned long _acm)
{
	struct acm *acm = (void *)_acm;
	struct acm_rb *buf;
	struct tty_struct *tty;
	struct acm_ru *rcv;
	unsigned long flags;
	unsigned char throttled;
	int copied;

	dbg("Entering acm_rx_tasklet");

	if (!ACM_READY(acm)) {
		dbg("acm_rx_tasklet: ACM not ready");
		return;
	}

	spin_lock_irqsave(&acm->throttle_lock, flags);
	throttled = acm->throttle;
	spin_unlock_irqrestore(&acm->throttle_lock, flags);
	if (throttled) {
		dbg("acm_rx_tasklet: throttled");
		return;
	}

	tty = tty_port_tty_get(&acm->port);

next_buffer:
	spin_lock_irqsave(&acm->read_lock, flags);
	if (list_empty(&acm->filled_read_bufs)) {
		spin_unlock_irqrestore(&acm->read_lock, flags);
		goto urbs;
	}
	buf = list_entry(acm->filled_read_bufs.next,
			 struct acm_rb, list);
	list_del(&buf->list);
	spin_unlock_irqrestore(&acm->read_lock, flags);

	dbg("acm_rx_tasklet: procesing buf 0x%p, size = %d", buf, buf->size);

	copied = 0;
	if (tty) {
		spin_lock_irqsave(&acm->throttle_lock, flags);
		throttled = acm->throttle;
		spin_unlock_irqrestore(&acm->throttle_lock, flags);
		if (!throttled) {
			copied = tty_insert_flip_string(tty, buf->base,
				buf->size);
			tty_flip_buffer_push(tty);
			if (copied != buf->size)
				dbg("%s: copied %d != buf->size %d!!!!!!\n",
					__func__, copied, buf->size);
			else
				dbg("%s: copied %d == buf->size %d\n",
					__func__, copied, buf->size);
		} else {
			tty_kref_put(tty);
			dbg("Throttling noticed");
			spin_lock_irqsave(&acm->read_lock, flags);
			list_add(&buf->list, &acm->filled_read_bufs);
			spin_unlock_irqrestore(&acm->read_lock, flags);
			return;
		}
	}

	if (copied == buf->size || !tty) {
		spin_lock_irqsave(&acm->read_lock, flags);
		list_add(&buf->list, &acm->spare_read_bufs);
		spin_unlock_irqrestore(&acm->read_lock, flags);
	} else {
		tty_kref_put(tty);
		dbg("Partial buffer fill %d", copied);
		if (copied > 0) {
			memmove(buf->base,
				buf->base + copied,
				buf->size - copied);
			buf->size -= copied;
		}
		spin_lock_irqsave(&acm->read_lock, flags);
		list_add(&buf->list, &acm->filled_read_bufs);
		spin_unlock_irqrestore(&acm->read_lock, flags);
		return;
	}
	goto next_buffer;

urbs:
	tty_kref_put(tty);

	while (!list_empty(&acm->spare_read_bufs)) {
		spin_lock_irqsave(&acm->read_lock, flags);
		if (list_empty(&acm->spare_read_urbs)) {
			acm->processing = 0;
			spin_unlock_irqrestore(&acm->read_lock, flags);
			return;
		}
		rcv = list_entry(acm->spare_read_urbs.next,
				 struct acm_ru, list);
		list_del(&rcv->list);
		spin_unlock_irqrestore(&acm->read_lock, flags);

		buf = list_entry(acm->spare_read_bufs.next,
				 struct acm_rb, list);
		list_del(&buf->list);

		rcv->buffer = buf;

		if (acm->is_int_ep)
			usb_fill_int_urb(rcv->urb, acm->dev,
					 acm->rx_endpoint,
					 buf->base,
					 acm->readsize,
					 acm_read_bulk, rcv, acm->bInterval);
		else
			usb_fill_bulk_urb(rcv->urb, acm->dev,
					  acm->rx_endpoint,
					  buf->base,
					  acm->readsize,
					  acm_read_bulk, rcv);
		rcv->urb->transfer_dma = buf->dma;
		rcv->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;

		/* This shouldn't kill the driver as unsuccessful URBs are
		   returned to the free-urbs-pool and resubmited ASAP */
		spin_lock_irqsave(&acm->read_lock, flags);
		if (acm->susp_count ||
				usb_submit_urb(rcv->urb, GFP_ATOMIC) < 0) {
			list_add(&buf->list, &acm->spare_read_bufs);
			list_add(&rcv->list, &acm->spare_read_urbs);
			acm->processing = 0;
			spin_unlock_irqrestore(&acm->read_lock, flags);
			return;
		} else {
			spin_unlock_irqrestore(&acm->read_lock, flags);
			dbg("acm_rx_tasklet: sending urb 0x%p, rcv 0x%p, buf 0x%p", rcv->urb, rcv, buf);
		}
	}
	spin_lock_irqsave(&acm->read_lock, flags);
	acm->processing = 0;
	spin_unlock_irqrestore(&acm->read_lock, flags);
}
Beispiel #13
0
static irqreturn_t sirfsoc_uart_isr(int irq, void *dev_id)
{
	unsigned long intr_status;
	unsigned long cts_status;
	unsigned long flag = TTY_NORMAL;
	struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)dev_id;
	struct uart_port *port = &sirfport->port;
	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
	struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
	struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st;
	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
	struct uart_state *state = port->state;
	struct circ_buf *xmit = &port->state->xmit;
	spin_lock(&port->lock);
	intr_status = rd_regl(port, ureg->sirfsoc_int_st_reg);
	wr_regl(port, ureg->sirfsoc_int_st_reg, intr_status);
	intr_status &= rd_regl(port, ureg->sirfsoc_int_en_reg);
	if (unlikely(intr_status & (SIRFUART_ERR_INT_STAT(uint_st,
				sirfport->uart_reg->uart_type)))) {
		if (intr_status & uint_st->sirfsoc_rxd_brk) {
			port->icount.brk++;
			if (uart_handle_break(port))
				goto recv_char;
		}
		if (intr_status & uint_st->sirfsoc_rx_oflow) {
			port->icount.overrun++;
			flag = TTY_OVERRUN;
		}
		if (intr_status & uint_st->sirfsoc_frm_err) {
			port->icount.frame++;
			flag = TTY_FRAME;
		}
		if (intr_status & uint_st->sirfsoc_parity_err) {
			port->icount.parity++;
			flag = TTY_PARITY;
		}
		wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
		wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
		wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START);
		intr_status &= port->read_status_mask;
		uart_insert_char(port, intr_status,
					uint_en->sirfsoc_rx_oflow_en, 0, flag);
	}
recv_char:
	if ((sirfport->uart_reg->uart_type == SIRF_REAL_UART) &&
			(intr_status & SIRFUART_CTS_INT_ST(uint_st)) &&
			!sirfport->tx_dma_state) {
		cts_status = rd_regl(port, ureg->sirfsoc_afc_ctrl) &
					SIRFUART_AFC_CTS_STATUS;
		if (cts_status != 0)
			cts_status = 0;
		else
			cts_status = 1;
		uart_handle_cts_change(port, cts_status);
		wake_up_interruptible(&state->port.delta_msr_wait);
	}
	if (!sirfport->rx_dma_chan &&
		(intr_status & SIRFUART_RX_IO_INT_ST(uint_st))) {
		/*
		 * chip will trigger continuous RX_TIMEOUT interrupt
		 * in RXFIFO empty and not trigger if RXFIFO recevice
		 * data in limit time, original method use RX_TIMEOUT
		 * will trigger lots of useless interrupt in RXFIFO
		 * empty.RXFIFO received one byte will trigger RX_DONE
		 * interrupt.use RX_DONE to wait for data received
		 * into RXFIFO, use RX_THD/RX_FULL for lots data receive
		 * and use RX_TIMEOUT for the last left data.
		 */
		if (intr_status & uint_st->sirfsoc_rx_done) {
			if (!sirfport->is_atlas7) {
				wr_regl(port, ureg->sirfsoc_int_en_reg,
					rd_regl(port, ureg->sirfsoc_int_en_reg)
					& ~(uint_en->sirfsoc_rx_done_en));
				wr_regl(port, ureg->sirfsoc_int_en_reg,
				rd_regl(port, ureg->sirfsoc_int_en_reg)
				| (uint_en->sirfsoc_rx_timeout_en));
			} else {
				wr_regl(port, ureg->sirfsoc_int_en_clr_reg,
					uint_en->sirfsoc_rx_done_en);
				wr_regl(port, ureg->sirfsoc_int_en_reg,
					uint_en->sirfsoc_rx_timeout_en);
			}
		} else {
			if (intr_status & uint_st->sirfsoc_rx_timeout) {
				if (!sirfport->is_atlas7) {
					wr_regl(port, ureg->sirfsoc_int_en_reg,
					rd_regl(port, ureg->sirfsoc_int_en_reg)
					& ~(uint_en->sirfsoc_rx_timeout_en));
					wr_regl(port, ureg->sirfsoc_int_en_reg,
					rd_regl(port, ureg->sirfsoc_int_en_reg)
					| (uint_en->sirfsoc_rx_done_en));
				} else {
					wr_regl(port,
						ureg->sirfsoc_int_en_clr_reg,
						uint_en->sirfsoc_rx_timeout_en);
					wr_regl(port, ureg->sirfsoc_int_en_reg,
						uint_en->sirfsoc_rx_done_en);
				}
			}
			sirfsoc_uart_pio_rx_chars(port, port->fifosize);
		}
	}
	spin_unlock(&port->lock);
	tty_flip_buffer_push(&state->port);
	spin_lock(&port->lock);
	if (intr_status & uint_st->sirfsoc_txfifo_empty) {
		if (sirfport->tx_dma_chan)
			sirfsoc_uart_tx_with_dma(sirfport);
		else {
			if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
				spin_unlock(&port->lock);
				return IRQ_HANDLED;
			} else {
				sirfsoc_uart_pio_tx_chars(sirfport,
						port->fifosize);
				if ((uart_circ_empty(xmit)) &&
				(rd_regl(port, ureg->sirfsoc_tx_fifo_status) &
				ufifo_st->ff_empty(port)))
					sirfsoc_uart_stop_tx(port);
			}
		}
	}
	spin_unlock(&port->lock);

	return IRQ_HANDLED;
}
Beispiel #14
0
static enum hrtimer_restart
	sirfsoc_uart_rx_dma_hrtimer_callback(struct hrtimer *hrt)
{
	struct sirfsoc_uart_port *sirfport;
	struct uart_port *port;
	int count, inserted;
	struct dma_tx_state tx_state;
	struct tty_struct *tty;
	struct sirfsoc_register *ureg;
	struct circ_buf *xmit;
	struct sirfsoc_fifo_status *ufifo_st;
	int max_pio_cnt;

	sirfport = container_of(hrt, struct sirfsoc_uart_port, hrt);
	port = &sirfport->port;
	inserted = 0;
	tty = port->state->port.tty;
	ureg = &sirfport->uart_reg->uart_reg;
	xmit = &sirfport->rx_dma_items.xmit;
	ufifo_st = &sirfport->uart_reg->fifo_status;

	dmaengine_tx_status(sirfport->rx_dma_chan,
			sirfport->rx_dma_items.cookie, &tx_state);
	if (SIRFSOC_RX_DMA_BUF_SIZE - tx_state.residue !=
		sirfport->rx_last_pos) {
		xmit->head = SIRFSOC_RX_DMA_BUF_SIZE - tx_state.residue;
		sirfport->rx_last_pos = xmit->head;
		sirfport->pio_fetch_cnt = 0;
	}
	count = CIRC_CNT_TO_END(xmit->head, xmit->tail,
			SIRFSOC_RX_DMA_BUF_SIZE);
	while (count > 0) {
		inserted = tty_insert_flip_string(tty->port,
			(const unsigned char *)&xmit->buf[xmit->tail], count);
		if (!inserted)
			goto next_hrt;
		port->icount.rx += inserted;
		xmit->tail = (xmit->tail + inserted) &
				(SIRFSOC_RX_DMA_BUF_SIZE - 1);
		count = CIRC_CNT_TO_END(xmit->head, xmit->tail,
				SIRFSOC_RX_DMA_BUF_SIZE);
		tty_flip_buffer_push(tty->port);
	}
	/*
	 * if RX DMA buffer data have all push into tty buffer, and there is
	 * only little data(less than a dma transfer unit) left in rxfifo,
	 * fetch it out in pio mode and switch back to dma immediately
	 */
	if (!inserted && !count &&
		((rd_regl(port, ureg->sirfsoc_rx_fifo_status) &
		SIRFUART_RX_FIFO_MASK) > sirfport->pio_fetch_cnt)) {
		dmaengine_pause(sirfport->rx_dma_chan);
		/* switch to pio mode */
		wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
			rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
			SIRFUART_IO_MODE);
		/*
		 * UART controller SWH_DMA_IO register have CLEAR_RX_ADDR_EN
		 * When found changing I/O to DMA mode, it clears
		 * two low bits of read point;
		 * USP have similar FRADDR_CLR_EN bit in USP_RX_DMA_IO_CTRL.
		 * Fetch data out from rxfifo into DMA buffer in PIO mode,
		 * while switch back to DMA mode, the data fetched will override
		 * by DMA, as hardware have a strange behaviour:
		 * after switch back to DMA mode, check rxfifo status it will
		 * be the number PIO fetched, so record the fetched data count
		 * to avoid the repeated fetch
		 */
		max_pio_cnt = 3;
		while (!(rd_regl(port, ureg->sirfsoc_rx_fifo_status) &
			ufifo_st->ff_empty(port)) && max_pio_cnt--) {
			xmit->buf[xmit->head] =
				rd_regl(port, ureg->sirfsoc_rx_fifo_data);
			xmit->head = (xmit->head + 1) &
					(SIRFSOC_RX_DMA_BUF_SIZE - 1);
			sirfport->pio_fetch_cnt++;
		}
		/* switch back to dma mode */
		wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
			rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) &
			~SIRFUART_IO_MODE);
		dmaengine_resume(sirfport->rx_dma_chan);
	}
next_hrt:
	hrtimer_forward_now(hrt, ns_to_ktime(sirfport->rx_period_time));
	return HRTIMER_RESTART;
}
Beispiel #15
0
static void pnx8xxx_rx_chars(struct pnx8xxx_port *sport)
{
	struct tty_struct *tty = sport->port.state->port.tty;
	unsigned int status, ch, flg;

	status = FIFO_TO_SM(serial_in(sport, PNX8XXX_FIFO)) |
		 ISTAT_TO_SM(serial_in(sport, PNX8XXX_ISTAT));
	while (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFIFO)) {
		ch = serial_in(sport, PNX8XXX_FIFO) & 0xff;

		sport->port.icount.rx++;

		flg = TTY_NORMAL;

		/*
		 * note that the error handling code is
		 * out of the main execution path
		 */
		if (status & (FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE |
					PNX8XXX_UART_FIFO_RXPAR |
					PNX8XXX_UART_FIFO_RXBRK) |
			      ISTAT_TO_SM(PNX8XXX_UART_INT_RXOVRN))) {
			if (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXBRK)) {
				status &= ~(FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE) |
					FIFO_TO_SM(PNX8XXX_UART_FIFO_RXPAR));
				sport->port.icount.brk++;
				if (uart_handle_break(&sport->port))
					goto ignore_char;
			} else if (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXPAR))
				sport->port.icount.parity++;
			else if (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE))
				sport->port.icount.frame++;
			if (status & ISTAT_TO_SM(PNX8XXX_UART_INT_RXOVRN))
				sport->port.icount.overrun++;

			status &= sport->port.read_status_mask;

			if (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXPAR))
				flg = TTY_PARITY;
			else if (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE))
				flg = TTY_FRAME;

#ifdef SUPPORT_SYSRQ
			sport->port.sysrq = 0;
#endif
		}

		if (uart_handle_sysrq_char(&sport->port, ch))
			goto ignore_char;

		uart_insert_char(&sport->port, status,
				ISTAT_TO_SM(PNX8XXX_UART_INT_RXOVRN), ch, flg);

	ignore_char:
		serial_out(sport, PNX8XXX_LCR, serial_in(sport, PNX8XXX_LCR) |
				PNX8XXX_UART_LCR_RX_NEXT);
		status = FIFO_TO_SM(serial_in(sport, PNX8XXX_FIFO)) |
			 ISTAT_TO_SM(serial_in(sport, PNX8XXX_ISTAT));
	}
	tty_flip_buffer_push(tty);
}
Beispiel #16
0
/*
 * read all chars in rx fifo and send them to core
 */
static void bcm_uart_do_rx(struct uart_port *port)
{
	struct tty_struct *tty;
	unsigned int max_count;

	/* limit number of char read in interrupt, should not be
	 * higher than fifo size anyway since we're much faster than
	 * serial port */
	max_count = 32;
	tty = port->info->tty;
	do {
		unsigned int iestat, c, cstat;
		char flag;

		/* get overrun/fifo empty information from ier
		 * register */
		iestat = bcm_uart_readl(port, UART_IR_REG);
		if (!(iestat & UART_IR_STAT(UART_IR_RXNOTEMPTY)))
			break;

		cstat = c = bcm_uart_readl(port, UART_FIFO_REG);
		port->icount.rx++;
		flag = TTY_NORMAL;
		c &= 0xff;

		if (unlikely((cstat & UART_FIFO_ANYERR_MASK))) {
			/* do stats first */
			if (cstat & UART_FIFO_BRKDET_MASK) {
				port->icount.brk++;
				if (uart_handle_break(port))
					continue;
			}

			if (cstat & UART_FIFO_PARERR_MASK)
				port->icount.parity++;
			if (cstat & UART_FIFO_FRAMEERR_MASK)
				port->icount.frame++;

			/* update flag wrt read_status_mask */
			cstat &= port->read_status_mask;
			if (cstat & UART_FIFO_BRKDET_MASK)
				flag = TTY_BREAK;
			if (cstat & UART_FIFO_FRAMEERR_MASK)
				flag = TTY_FRAME;
			if (cstat & UART_FIFO_PARERR_MASK)
				flag = TTY_PARITY;
		}

		if (uart_handle_sysrq_char(port, c))
			continue;

		if (unlikely(iestat & UART_IR_STAT(UART_IR_RXOVER))) {
			port->icount.overrun++;
			tty_insert_flip_char(tty, 0, TTY_OVERRUN);
		}

		if ((cstat & port->ignore_status_mask) == 0)
			tty_insert_flip_char(tty, c, flag);

	} while (--max_count);

	tty_flip_buffer_push(tty);
}
Beispiel #17
0
static unsigned int sw_uart_handle_rx(struct sw_uart_port *sw_uport, unsigned int lsr)
{
	struct tty_struct *tty = sw_uport->port.state->port.tty;
	unsigned char ch = 0;
	int max_count = 256;
	char flag;

	do {
		if (likely(lsr & SW_UART_LSR_DR)) {
			ch = serial_in(&sw_uport->port, SW_UART_RBR);
#ifdef CONFIG_SW_UART_DUMP_DATA
			sw_uport->dump_buff[sw_uport->dump_len++] = ch;
#endif
		}

		flag = TTY_NORMAL;
		sw_uport->port.icount.rx++;

		if (unlikely(lsr & SW_UART_LSR_BRK_ERROR_BITS)) {
			/*
			 * For statistics only
			 */
			if (lsr & SW_UART_LSR_BI) {
				lsr &= ~(SW_UART_LSR_FE | SW_UART_LSR_PE);
				sw_uport->port.icount.brk++;
				/*
				 * We do the SysRQ and SAK checking
				 * here because otherwise the break
				 * may get masked by ignore_status_mask
				 * or read_status_mask.
				 */
				if (uart_handle_break(&sw_uport->port))
					goto ignore_char;
			} else if (lsr & SW_UART_LSR_PE)
				sw_uport->port.icount.parity++;
			else if (lsr & SW_UART_LSR_FE)
				sw_uport->port.icount.frame++;
			if (lsr & SW_UART_LSR_OE)
				sw_uport->port.icount.overrun++;

			/*
			 * Mask off conditions which should be ignored.
			 */
			lsr &= sw_uport->port.read_status_mask;
#ifdef CONFIG_SERIAL_SUNXI_CONSOLE
			if (sw_is_console_port(&sw_uport->port)) {
				/* Recover the break flag from console xmit */
				lsr |= sw_uport->lsr_break_flag;
			}
#endif
			if (lsr & SW_UART_LSR_BI)
				flag = TTY_BREAK;
			else if (lsr & SW_UART_LSR_PE)
				flag = TTY_PARITY;
			else if (lsr & SW_UART_LSR_FE)
				flag = TTY_FRAME;
		}
		if (uart_handle_sysrq_char(&sw_uport->port, ch))
			goto ignore_char;
		uart_insert_char(&sw_uport->port, lsr, SW_UART_LSR_OE, ch, flag);
ignore_char:
		lsr = serial_in(&sw_uport->port, SW_UART_LSR);
	} while ((lsr & (SW_UART_LSR_DR | SW_UART_LSR_BI)) && (max_count-- > 0));

	SERIAL_DUMP(sw_uport, "Rx");
	spin_unlock(&sw_uport->port.lock);
	tty_flip_buffer_push(tty);
	spin_lock(&sw_uport->port.lock);

	return lsr;
}
Beispiel #18
0
static inline void receive_chars(struct uart_omap_port *up,
		unsigned int *status)
{
	struct tty_struct *tty = up->port.state->port.tty;
	unsigned int flag, lsr = *status;
	unsigned char ch = 0;
	int max_count = 256;

	do {
		if (likely(lsr & UART_LSR_DR))
			ch = serial_in(up, UART_RX);
		flag = TTY_NORMAL;
		up->port.icount.rx++;

		if (unlikely(lsr & UART_LSR_BRK_ERROR_BITS)) {
			/*
			 * For statistics only
			 */
			if (lsr & UART_LSR_BI) {
				lsr &= ~(UART_LSR_FE | UART_LSR_PE);
				up->port.icount.brk++;
				/*
				 * We do the SysRQ and SAK checking
				 * here because otherwise the break
				 * may get masked by ignore_status_mask
				 * or read_status_mask.
				 */
				if (uart_handle_break(&up->port))
					goto ignore_char;
			} else if (lsr & UART_LSR_PE) {
				up->port.icount.parity++;
			} else if (lsr & UART_LSR_FE) {
				up->port.icount.frame++;
			}

			if (lsr & UART_LSR_OE)
				up->port.icount.overrun++;

			/*
			 * Mask off conditions which should be ignored.
			 */
			lsr &= up->port.read_status_mask;

#ifdef CONFIG_SERIAL_OMAP_CONSOLE
			if (up->port.line == up->port.cons->index) {
				/* Recover the break flag from console xmit */
				lsr |= up->lsr_break_flag;
			}
#endif
			if (lsr & UART_LSR_BI)
				flag = TTY_BREAK;
			else if (lsr & UART_LSR_PE)
				flag = TTY_PARITY;
			else if (lsr & UART_LSR_FE)
				flag = TTY_FRAME;
		}

		if (uart_handle_sysrq_char(&up->port, ch))
			goto ignore_char;
		uart_insert_char(&up->port, lsr, UART_LSR_OE, ch, flag);
ignore_char:
		lsr = serial_in(up, UART_LSR);
	} while ((lsr & (UART_LSR_DR | UART_LSR_BI)) && (max_count-- > 0));
	spin_unlock(&up->port.lock);
	tty_flip_buffer_push(tty);
	spin_lock(&up->port.lock);
}
Beispiel #19
0
static inline void
receive_chars(struct uart_pxa_port *up, int *status, struct pt_regs *regs)
{
	struct tty_struct *tty = up->port.info->tty;
	unsigned int ch, flag;
	int max_count = 256;

	do {
		ch = serial_in(up, UART_RX);
		flag = TTY_NORMAL;
		up->port.icount.rx++;

		if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE |
				       UART_LSR_FE | UART_LSR_OE))) {
			/*
			 * For statistics only
			 */
			if (*status & UART_LSR_BI) {
				*status &= ~(UART_LSR_FE | UART_LSR_PE);
				up->port.icount.brk++;
				/*
				 * We do the SysRQ and SAK checking
				 * here because otherwise the break
				 * may get masked by ignore_status_mask
				 * or read_status_mask.
				 */
				if (uart_handle_break(&up->port))
					goto ignore_char;
			} else if (*status & UART_LSR_PE)
				up->port.icount.parity++;
			else if (*status & UART_LSR_FE)
				up->port.icount.frame++;
			if (*status & UART_LSR_OE)
				up->port.icount.overrun++;

			/*
			 * Mask off conditions which should be ignored.
			 */
			*status &= up->port.read_status_mask;

#ifdef CONFIG_SERIAL_PXA_CONSOLE
			if (up->port.line == up->port.cons->index) {
				/* Recover the break flag from console xmit */
				*status |= up->lsr_break_flag;
				up->lsr_break_flag = 0;
			}
#endif
			if (*status & UART_LSR_BI) {
				flag = TTY_BREAK;
			} else if (*status & UART_LSR_PE)
				flag = TTY_PARITY;
			else if (*status & UART_LSR_FE)
				flag = TTY_FRAME;
		}

		if (uart_handle_sysrq_char(&up->port, ch, regs))
			goto ignore_char;

		uart_insert_char(&up->port, *status, UART_LSR_OE, ch, flag);

	ignore_char:
		*status = serial_in(up, UART_LSR);
	} while ((*status & UART_LSR_DR) && (max_count-- > 0));
	tty_flip_buffer_push(tty);
}
Beispiel #20
0
static inline void
receive_chars(struct uart_omap_port *up, unsigned int *status)
{
	struct tty_struct *tty = up->port.state->port.tty;
	unsigned int flag;
	unsigned char ch, lsr = *status;
	int max_count = 256;

	do {
		if (likely(lsr & UART_LSR_DR))
			ch = serial_in(up, UART_RX);
		flag = TTY_NORMAL;
		up->port.icount.rx++;

		if (unlikely(lsr & UART_LSR_BRK_ERROR_BITS)) {
			/*
			 * For statistics only
			 */
			if (lsr & UART_LSR_BI) {
				lsr &= ~(UART_LSR_FE | UART_LSR_PE);
				up->port.icount.brk++;
				/*
				 * We do the SysRQ and SAK checking
				 * here because otherwise the break
				 * may get masked by ignore_status_mask
				 * or read_status_mask.
				 */
				if (uart_handle_break(&up->port))
					goto ignore_char;
			} else if (lsr & UART_LSR_PE)
				up->port.icount.parity++;
			else if (lsr & UART_LSR_FE)
				up->port.icount.frame++;
			if (lsr & UART_LSR_OE)
				up->port.icount.overrun++;

			/*
			 * Mask off conditions which should be ignored.
			 */
			lsr &= up->port.read_status_mask;

#ifdef CONFIG_SERIAL_OMAP_CONSOLE
			if (up->port.line == up->port.cons->index) {
				/* Recover the break flag from console xmit */
				lsr |= up->lsr_break_flag;
				up->lsr_break_flag = 0;
			}
#endif
			if (lsr & UART_LSR_BI)
				flag = TTY_BREAK;
			else if (lsr & UART_LSR_PE)
				flag = TTY_PARITY;
			else if (lsr & UART_LSR_FE)
				flag = TTY_FRAME;
		}

#if defined(CONFIG_KEYBOARD_P1)
            if((up->port.line == 2)&&(g_keyboard))
            {
                if(ch != 0)
                    send_keyevent(ch);
                goto ignore_char;
            }
#endif

		if (uart_handle_sysrq_char(&up->port, ch))
			goto ignore_char;
		uart_insert_char(&up->port, lsr, UART_LSR_OE, ch, flag);
ignore_char:
		lsr = serial_in(up, UART_LSR);
	} while ((lsr & (UART_LSR_DR | UART_LSR_BI)) && (max_count-- > 0));

	/* Wait for some time, to assure if the TX or RX starts.
	 * This has to be relooked when the actual use case sec
	 * narios would handle these wake-locks.
	 */
	if (up->plat_hold_wakelock)
		(up->plat_hold_wakelock(up, WAKELK_RX));
	spin_unlock(&up->port.lock);
	tty_flip_buffer_push(tty);
	spin_lock(&up->port.lock);
}
static irqreturn_t tegra_uart_isr(int irq, void *data)
{
	struct tegra_uart_port *t = data;
	struct uart_port *u = &t->uport;
	unsigned char iir;
	unsigned char ier;
	bool is_rx_int = false;
	unsigned long flags;

	spin_lock_irqsave(&u->lock, flags);
	t  = container_of(u, struct tegra_uart_port, uport);
	while (1) {
		iir = uart_readb(t, UART_IIR);
		if (iir & UART_IIR_NO_INT) {
			if (likely(t->use_rx_dma) && is_rx_int) {
				do_handle_rx_dma(t);

				if (t->rx_in_progress) {
					ier = t->ier_shadow;
					ier |= (UART_IER_RLSI | UART_IER_RTOIE | UART_IER_EORD);
					t->ier_shadow = ier;
					uart_writeb(t, ier, UART_IER);
				}
			}
			spin_unlock_irqrestore(&u->lock, flags);
			return IRQ_HANDLED;
		}

		dev_dbg(u->dev, "tegra_uart_isr iir = 0x%x (%d)\n", iir,
			(iir >> 1) & 0x7);
		switch ((iir >> 1) & 0x7) {
		case 0: /* Modem signal change interrupt */
			do_handle_modem_signal(u);
			break;
		case 1: /* Transmit interrupt only triggered when using PIO */
			t->ier_shadow &= ~UART_IER_THRI;
			uart_writeb(t, t->ier_shadow, UART_IER);
			do_handle_tx_pio(t);
			break;
		case 4: /* End of data */
		case 6: /* Rx timeout */
		case 2: /* Receive */
			if (likely(t->use_rx_dma)) {
				if (!is_rx_int) {
					is_rx_int = true;
                                        /* Disable interrups */
                                        ier = t->ier_shadow;
                                        ier |= UART_IER_RDI;
                                        uart_writeb(t, ier, UART_IER);
                                        ier &= ~(UART_IER_RDI | UART_IER_RLSI | UART_IER_RTOIE | UART_IER_EORD);
                                        t->ier_shadow = ier;
                                        uart_writeb(t, ier, UART_IER);
                                }
                        } else {
				do_handle_rx_pio(t);

				spin_unlock_irqrestore(&u->lock, flags);
				tty_flip_buffer_push(u->state->port.tty);
				spin_lock_irqsave(&u->lock, flags);
			}
			break;
		case 3: /* Receive error */
			/* FIXME how to handle this? Why do we get here */
			do_decode_rx_error(t, uart_readb(t, UART_LSR));
			break;
		case 5: /* break nothing to handle */
		case 7: /* break nothing to handle */
			break;
		}
	}
}
Beispiel #22
0
static void acm_rx_tasklet(unsigned long _acm)
{
	struct acm *acm = (void *)_acm;
	struct acm_rb *buf;
	struct tty_struct *tty = acm->tty;
	struct acm_ru *rcv;
	unsigned long flags;
	unsigned char throttled;

	dbg("Entering acm_rx_tasklet");

	if (!ACM_READY(acm))
	{
		dbg("acm_rx_tasklet: ACM not ready");
		return;
	}

	spin_lock_irqsave(&acm->throttle_lock, flags);
	throttled = acm->throttle;
	spin_unlock_irqrestore(&acm->throttle_lock, flags);
	if (throttled)
	{
		dbg("acm_rx_tasklet: throttled");
		return;
	}

next_buffer:
	spin_lock_irqsave(&acm->read_lock, flags);
	if (list_empty(&acm->filled_read_bufs)) {
		spin_unlock_irqrestore(&acm->read_lock, flags);
		goto urbs;
	}
	buf = list_entry(acm->filled_read_bufs.next,
			 struct acm_rb, list);
	list_del(&buf->list);
	spin_unlock_irqrestore(&acm->read_lock, flags);

	dbg("acm_rx_tasklet: procesing buf 0x%p, size = %d", buf, buf->size);

	tty_buffer_request_room(tty, buf->size);
	spin_lock_irqsave(&acm->throttle_lock, flags);
	throttled = acm->throttle;
	spin_unlock_irqrestore(&acm->throttle_lock, flags);
	if (!throttled)
		tty_insert_flip_string(tty, buf->base, buf->size);
	tty_flip_buffer_push(tty);

	if (throttled) {
		dbg("Throttling noticed");
		spin_lock_irqsave(&acm->read_lock, flags);
		list_add(&buf->list, &acm->filled_read_bufs);
		spin_unlock_irqrestore(&acm->read_lock, flags);
		return;
	}

	spin_lock_irqsave(&acm->read_lock, flags);
	list_add(&buf->list, &acm->spare_read_bufs);
	spin_unlock_irqrestore(&acm->read_lock, flags);
	goto next_buffer;

urbs:
	while (!list_empty(&acm->spare_read_bufs)) {
		spin_lock_irqsave(&acm->read_lock, flags);
		if (list_empty(&acm->spare_read_urbs)) {
			acm->processing = 0;
			spin_unlock_irqrestore(&acm->read_lock, flags);
			return;
		}
		rcv = list_entry(acm->spare_read_urbs.next,
				 struct acm_ru, list);
		list_del(&rcv->list);
		spin_unlock_irqrestore(&acm->read_lock, flags);

		buf = list_entry(acm->spare_read_bufs.next,
				 struct acm_rb, list);
		list_del(&buf->list);

		rcv->buffer = buf;

		usb_fill_bulk_urb(rcv->urb, acm->dev,
				  acm->rx_endpoint,
				  buf->base,
				  acm->readsize,
				  acm_read_bulk, rcv);
		rcv->urb->transfer_dma = buf->dma;
		rcv->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;

		/* This shouldn't kill the driver as unsuccessful URBs are returned to the
		   free-urbs-pool and resubmited ASAP */
		spin_lock_irqsave(&acm->read_lock, flags);
		if (acm->susp_count || usb_submit_urb(rcv->urb, GFP_ATOMIC) < 0) {
			list_add(&buf->list, &acm->spare_read_bufs);
			list_add(&rcv->list, &acm->spare_read_urbs);
			acm->processing = 0;
			spin_unlock_irqrestore(&acm->read_lock, flags);
			return;
		} else {
			spin_unlock_irqrestore(&acm->read_lock, flags);
			dbg("acm_rx_tasklet: sending urb 0x%p, rcv 0x%p, buf 0x%p", rcv->urb, rcv, buf);
		}
	}
	spin_lock_irqsave(&acm->read_lock, flags);
	acm->processing = 0;
	spin_unlock_irqrestore(&acm->read_lock, flags);
}
Beispiel #23
0
void jsm_input(struct jsm_channel *ch)
{
	struct jsm_board *bd;
	struct tty_struct *tp;
	u32 rmask;
	u16 head;
	u16 tail;
	int data_len;
	unsigned long lock_flags;
	int len = 0;
	int n = 0;
	int s = 0;
	int i = 0;

	jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "start\n");

	if (!ch)
		return;

	tp = ch->uart_port.state->port.tty;

	bd = ch->ch_bd;
	if(!bd)
		return;

	spin_lock_irqsave(&ch->ch_lock, lock_flags);

	/*
	 *Figure the number of characters in the buffer.
	 *Exit immediately if none.
	 */

	rmask = RQUEUEMASK;

	head = ch->ch_r_head & rmask;
	tail = ch->ch_r_tail & rmask;

	data_len = (head - tail) & rmask;
	if (data_len == 0) {
		spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
		return;
	}

	jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "start\n");

	/*
	 *If the device is not open, or CREAD is off, flush
	 *input data and return immediately.
	 */
	if (!tp ||
		!(tp->termios->c_cflag & CREAD) ) {

		jsm_printk(READ, INFO, &ch->ch_bd->pci_dev,
			"input. dropping %d bytes on port %d...\n", data_len, ch->ch_portnum);
		ch->ch_r_head = tail;

		/* Force queue flow control to be released, if needed */
		jsm_check_queue_flow_control(ch);

		spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
		return;
	}

	/*
	 * If we are throttled, simply don't read any data.
	 */
	if (ch->ch_flags & CH_STOPI) {
		spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
		jsm_printk(READ, INFO, &ch->ch_bd->pci_dev,
			"Port %d throttled, not reading any data. head: %x tail: %x\n",
			ch->ch_portnum, head, tail);
		return;
	}

	jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "start 2\n");

	if (data_len <= 0) {
		spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
		jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "jsm_input 1\n");
		return;
	}

	len = tty_buffer_request_room(tp, data_len);
	n = len;

	/*
	 * n now contains the most amount of data we can copy,
	 * bounded either by the flip buffer size or the amount
	 * of data the card actually has pending...
	 */
	while (n) {
		s = ((head >= tail) ? head : RQUEUESIZE) - tail;
		s = min(s, n);

		if (s <= 0)
			break;

			/*
			 * If conditions are such that ld needs to see all
			 * UART errors, we will have to walk each character
			 * and error byte and send them to the buffer one at
			 * a time.
			 */

		if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) {
			for (i = 0; i < s; i++) {
				/*
				 * Give the Linux ld the flags in the
				 * format it likes.
				 */
				if (*(ch->ch_equeue +tail +i) & UART_LSR_BI)
					tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i),  TTY_BREAK);
				else if (*(ch->ch_equeue +tail +i) & UART_LSR_PE)
					tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_PARITY);
				else if (*(ch->ch_equeue +tail +i) & UART_LSR_FE)
					tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_FRAME);
				else
					tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_NORMAL);
			}
		} else {
			tty_insert_flip_string(tp, ch->ch_rqueue + tail, s) ;
		}
		tail += s;
		n -= s;
		/* Flip queue if needed */
		tail &= rmask;
	}

	ch->ch_r_tail = tail & rmask;
	ch->ch_e_tail = tail & rmask;
	jsm_check_queue_flow_control(ch);
	spin_unlock_irqrestore(&ch->ch_lock, lock_flags);

	/* Tell the tty layer its okay to "eat" the data now */
	tty_flip_buffer_push(tp);

	jsm_printk(IOCTL, INFO, &ch->ch_bd->pci_dev, "finish\n");
}
Beispiel #24
0
static void receive_chars(struct uart_sio_port *up, int *status)
{
	struct tty_port *port = &up->port.state->port;
	unsigned char ch;
	unsigned char flag;
	int max_count = 256;

	do {
		ch = sio_in(up, SIORXB);
		flag = TTY_NORMAL;
		up->port.icount.rx++;

		if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE |
				       UART_LSR_FE | UART_LSR_OE))) {
			/*
			 * For statistics only
			 */
			if (*status & UART_LSR_BI) {
				*status &= ~(UART_LSR_FE | UART_LSR_PE);
				up->port.icount.brk++;
				/*
				 * We do the SysRQ and SAK checking
				 * here because otherwise the break
				 * may get masked by ignore_status_mask
				 * or read_status_mask.
				 */
				if (uart_handle_break(&up->port))
					goto ignore_char;
			} else if (*status & UART_LSR_PE)
				up->port.icount.parity++;
			else if (*status & UART_LSR_FE)
				up->port.icount.frame++;
			if (*status & UART_LSR_OE)
				up->port.icount.overrun++;

			/*
			 * Mask off conditions which should be ingored.
			 */
			*status &= up->port.read_status_mask;

			if (up->port.line == up->port.cons->index) {
				/* Recover the break flag from console xmit */
				*status |= up->lsr_break_flag;
				up->lsr_break_flag = 0;
			}

			if (*status & UART_LSR_BI) {
				DEBUG_INTR("handling break....");
				flag = TTY_BREAK;
			} else if (*status & UART_LSR_PE)
				flag = TTY_PARITY;
			else if (*status & UART_LSR_FE)
				flag = TTY_FRAME;
		}
		if (uart_handle_sysrq_char(&up->port, ch))
			goto ignore_char;
		if ((*status & up->port.ignore_status_mask) == 0)
			tty_insert_flip_char(port, ch, flag);

		if (*status & UART_LSR_OE) {
			/*
			 * Overrun is special, since it's reported
			 * immediately, and doesn't affect the current
			 * character.
			 */
			tty_insert_flip_char(port, 0, TTY_OVERRUN);
		}
	ignore_char:
		*status = serial_in(up, UART_LSR);
	} while ((*status & UART_LSR_DR) && (max_count-- > 0));

	spin_unlock(&up->port.lock);
	tty_flip_buffer_push(port);
	spin_lock(&up->port.lock);
}
Beispiel #25
0
static irqreturn_t sirfsoc_uart_isr(int irq, void *dev_id)
{
	unsigned long intr_status;
	unsigned long cts_status;
	unsigned long flag = TTY_NORMAL;
	struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)dev_id;
	struct uart_port *port = &sirfport->port;
	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
	struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
	struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st;
	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
	struct uart_state *state = port->state;
	struct circ_buf *xmit = &port->state->xmit;
	spin_lock(&port->lock);
	intr_status = rd_regl(port, ureg->sirfsoc_int_st_reg);
	wr_regl(port, ureg->sirfsoc_int_st_reg, intr_status);
	intr_status &= rd_regl(port, ureg->sirfsoc_int_en_reg);
	if (unlikely(intr_status & (SIRFUART_ERR_INT_STAT(port, uint_st)))) {
		if (intr_status & uint_st->sirfsoc_rxd_brk) {
			port->icount.brk++;
			if (uart_handle_break(port))
				goto recv_char;
		}
		if (intr_status & uint_st->sirfsoc_rx_oflow)
			port->icount.overrun++;
		if (intr_status & uint_st->sirfsoc_frm_err) {
			port->icount.frame++;
			flag = TTY_FRAME;
		}
		if (intr_status & uint_st->sirfsoc_parity_err)
			flag = TTY_PARITY;
		wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
		wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
		wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START);
		intr_status &= port->read_status_mask;
		uart_insert_char(port, intr_status,
					uint_en->sirfsoc_rx_oflow_en, 0, flag);
	}
recv_char:
	if ((sirfport->uart_reg->uart_type == SIRF_REAL_UART) &&
			(intr_status & SIRFUART_CTS_INT_ST(uint_st)) &&
			!sirfport->tx_dma_state) {
		cts_status = rd_regl(port, ureg->sirfsoc_afc_ctrl) &
					SIRFUART_AFC_CTS_STATUS;
		if (cts_status != 0)
			cts_status = 0;
		else
			cts_status = 1;
		uart_handle_cts_change(port, cts_status);
		wake_up_interruptible(&state->port.delta_msr_wait);
	}
	if (sirfport->rx_dma_chan) {
		if (intr_status & uint_st->sirfsoc_rx_timeout)
			sirfsoc_uart_handle_rx_tmo(sirfport);
		if (intr_status & uint_st->sirfsoc_rx_done)
			sirfsoc_uart_handle_rx_done(sirfport);
	} else {
		if (intr_status & SIRFUART_RX_IO_INT_ST(uint_st))
			sirfsoc_uart_pio_rx_chars(port,
					SIRFSOC_UART_IO_RX_MAX_CNT);
	}
	spin_unlock(&port->lock);
	tty_flip_buffer_push(&state->port);
	spin_lock(&port->lock);
	if (intr_status & uint_st->sirfsoc_txfifo_empty) {
		if (sirfport->tx_dma_chan)
			sirfsoc_uart_tx_with_dma(sirfport);
		else {
			if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
				spin_unlock(&port->lock);
				return IRQ_HANDLED;
			} else {
				sirfsoc_uart_pio_tx_chars(sirfport,
					SIRFSOC_UART_IO_TX_REASONABLE_CNT);
				if ((uart_circ_empty(xmit)) &&
				(rd_regl(port, ureg->sirfsoc_tx_fifo_status) &
				ufifo_st->ff_empty(port->line)))
					sirfsoc_uart_stop_tx(port);
			}
		}
	}
	spin_unlock(&port->lock);

	return IRQ_HANDLED;
}
Beispiel #26
0
/*
 * Characters received (called from interrupt handler)
 */
static void atmel_rx_chars(struct uart_port *port)
{
	struct atmel_uart_port *atmel_port = (struct atmel_uart_port *) port;
	struct tty_struct *tty = port->info->tty;
	unsigned int status, ch, flg;

	status = UART_GET_CSR(port);
	while (status & ATMEL_US_RXRDY) {
		ch = UART_GET_CHAR(port);

		port->icount.rx++;

		flg = TTY_NORMAL;

		/*
		 * note that the error handling code is
		 * out of the main execution path
		 */
		if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
				       | ATMEL_US_OVRE | ATMEL_US_RXBRK)
			     || atmel_port->break_active)) {
			UART_PUT_CR(port, ATMEL_US_RSTSTA);	/* clear error */
			if (status & ATMEL_US_RXBRK
			    && !atmel_port->break_active) {
				status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);	/* ignore side-effect */
				port->icount.brk++;
				atmel_port->break_active = 1;
				UART_PUT_IER(port, ATMEL_US_RXBRK);
				if (uart_handle_break(port))
					goto ignore_char;
			} else {
				/*
				 * This is either the end-of-break
				 * condition or we've received at
				 * least one character without RXBRK
				 * being set. In both cases, the next
				 * RXBRK will indicate start-of-break.
				 */
				UART_PUT_IDR(port, ATMEL_US_RXBRK);
				status &= ~ATMEL_US_RXBRK;
				atmel_port->break_active = 0;
			}
			if (status & ATMEL_US_PARE)
				port->icount.parity++;
			if (status & ATMEL_US_FRAME)
				port->icount.frame++;
			if (status & ATMEL_US_OVRE)
				port->icount.overrun++;

			status &= port->read_status_mask;

			if (status & ATMEL_US_RXBRK)
				flg = TTY_BREAK;
			else if (status & ATMEL_US_PARE)
				flg = TTY_PARITY;
			else if (status & ATMEL_US_FRAME)
				flg = TTY_FRAME;
		}

		if (uart_handle_sysrq_char(port, ch))
			goto ignore_char;

		uart_insert_char(port, status, ATMEL_US_OVRE, ch, flg);

	ignore_char:
		status = UART_GET_CSR(port);
	}

	tty_flip_buffer_push(tty);
}
/*
 * RX tasklet takes data out of the RX queue and hands it up to the TTY
 * layer until it refuses to take any more data (or is throttled back).
 * Then it issues reads for any further data.
 *
 * If the RX queue becomes full enough that no usb_request is queued,
 * the OUT endpoint may begin NAKing as soon as its FIFO fills up.
 * So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
 * can be buffered before the TTY layer's buffers (currently 64 KB).
 */
static void gs_rx_push(struct work_struct *w)
{
	struct gs_port		*port = container_of(w, struct gs_port, push);
	struct tty_struct	*tty;
	struct list_head	*queue = &port->read_queue;
	bool			disconnect = false;
	bool			do_push = false;

	/* hand any queued data to the tty */
	spin_lock_irq(&port->port_lock);
	tty = port->port_tty;
	while (!list_empty(queue)) {
		struct usb_request	*req;

		req = list_first_entry(queue, struct usb_request, list);

		/* discard data if tty was closed */
		if (!tty)
			goto recycle;

		/* leave data queued if tty was rx throttled */
		if (test_bit(TTY_THROTTLED, &tty->flags))
			break;

		switch (req->status) {
		case -ESHUTDOWN:
			disconnect = true;
			pr_vdebug(PREFIX "%d: shutdown\n", port->port_num);
			break;

		default:
			/* presumably a transient fault */
			pr_warning(PREFIX "%d: unexpected RX status %d\n",
					port->port_num, req->status);
			/* FALLTHROUGH */
		case 0:
			/* normal completion */
			break;
		}

		/* push data to (open) tty */
		if (req->actual) {
			char		*packet = req->buf;
			unsigned	size = req->actual;
			unsigned	n;
			int		count;

			/* we may have pushed part of this packet already... */
			n = port->n_read;
			if (n) {
				packet += n;
				size -= n;
			}

			count = tty_insert_flip_string(tty, packet, size);
			port->nbytes_to_tty += count;
			if (count)
				do_push = true;
			if (count != size) {
				/* stop pushing; TTY layer can't handle more */
				port->n_read += count;
				pr_vdebug(PREFIX "%d: rx block %d/%d\n",
						port->port_num,
						count, req->actual);
				break;
			}
			port->n_read = 0;
		}
recycle:
		list_move(&req->list, &port->read_pool);
		port->read_started--;
	}

	/* Push from tty to ldisc; without low_latency set this is handled by
	 * a workqueue, so we won't get callbacks and can hold port_lock
	 */
	if (tty && do_push) {
		spin_unlock_irq(&port->port_lock);
		tty_flip_buffer_push(tty);
		wake_up_interruptible(&tty->read_wait);
		spin_lock_irq(&port->port_lock);

		/* tty may have been closed */
		tty = port->port_tty;
	}

	/* We want our data queue to become empty ASAP, keeping data
	 * in the tty and ldisc (not here).  If we couldn't push any
	 * this time around, there may be trouble unless there's an
	 * implicit tty_unthrottle() call on its way...
	 *
	 * REVISIT we should probably add a timer to keep the work queue
	 * from starving ... but it's not clear that case ever happens.
	 */
	if (!list_empty(queue) && tty) {
		if (!test_bit(TTY_THROTTLED, &tty->flags)) {
			if (do_push)
				queue_work(gserial_wq, &port->push);
			else
				pr_warning(PREFIX "%d: RX not scheduled?\n",
					port->port_num);
		}
	}

	/* If we're still connected, refill the USB RX queue. */
	if (!disconnect && port->port_usb)
		gs_start_rx(port);

	spin_unlock_irq(&port->port_lock);
}
Beispiel #28
0
static irqreturn_t
s3c24xx_serial_rx_chars(int irq, void *dev_id)
{
	struct s3c24xx_uart_port *ourport = dev_id;
	struct uart_port *port = &ourport->port;
	struct tty_struct *tty = port->state->port.tty;
	unsigned int ufcon, ch, flag, ufstat, uerstat;
	int max_count = 64;

	while (max_count-- > 0) {
		ufcon = rd_regl(port, S3C2410_UFCON);
		ufstat = rd_regl(port, S3C2410_UFSTAT);

		if (s3c24xx_serial_rx_fifocnt(ourport, ufstat) == 0)
			break;

		uerstat = rd_regl(port, S3C2410_UERSTAT);
		ch = rd_regb(port, S3C2410_URXH);

		if (port->flags & UPF_CONS_FLOW) {
			int txe = s3c24xx_serial_txempty_nofifo(port);

			if (rx_enabled(port)) {
				if (!txe) {
					rx_enabled(port) = 0;
					continue;
				}
			} else {
				if (txe) {
					ufcon |= S3C2410_UFCON_RESETRX;
					wr_regl(port, S3C2410_UFCON, ufcon);
					rx_enabled(port) = 1;
					goto out;
				}
				continue;
			}
		}

		/* insert the character into the buffer */

		flag = TTY_NORMAL;
		port->icount.rx++;

		if (unlikely(uerstat & S3C2410_UERSTAT_ANY)) {
			dbg("rxerr: port ch=0x%02x, rxs=0x%08x\n",
			    ch, uerstat);

			/* check for break */
			if (uerstat & S3C2410_UERSTAT_BREAK) {
				dbg("break!\n");
				port->icount.brk++;
				if (uart_handle_break(port))
				    goto ignore_char;
			}

			if (uerstat & S3C2410_UERSTAT_FRAME)
				port->icount.frame++;
			if (uerstat & S3C2410_UERSTAT_OVERRUN)
				port->icount.overrun++;

			uerstat &= port->read_status_mask;

			if (uerstat & S3C2410_UERSTAT_BREAK)
				flag = TTY_BREAK;
			else if (uerstat & S3C2410_UERSTAT_PARITY)
				flag = TTY_PARITY;
			else if (uerstat & (S3C2410_UERSTAT_FRAME |
					    S3C2410_UERSTAT_OVERRUN))
				flag = TTY_FRAME;
		}

		if (uart_handle_sysrq_char(port, ch))
			goto ignore_char;

		uart_insert_char(port, uerstat, S3C2410_UERSTAT_OVERRUN,
				 ch, flag);

 ignore_char:
		continue;
	}
	tty_flip_buffer_push(tty);

 out:
	return IRQ_HANDLED;
}
Beispiel #29
0
/*
 * RX tasklet takes data out of the RX queue and hands it up to the TTY
 * layer until it refuses to take any more data (or is throttled back).
 * Then it issues reads for any further data.
 *
 * If the RX queue becomes full enough that no usb_request is queued,
 * the OUT endpoint may begin NAKing as soon as its FIFO fills up.
 * So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
 * can be buffered before the TTY layer's buffers (currently 64 KB).
 */
static void gs_rx_push(unsigned long _port)
{
	struct gs_port		*port = (void *)_port;
	struct tty_struct	*tty;
	struct list_head	*queue = &port->read_queue;
	bool			disconnect = false;
	bool			do_push = false;

	/* hand any queued data to the tty */
	spin_lock_irq(&port->port_lock);
	tty = port->port.tty;
	while (!list_empty(queue)) {
		struct usb_request	*req;

		req = list_first_entry(queue, struct usb_request, list);

		/* leave data queued if tty was rx throttled */
		if (tty && tty_throttled(tty))
			break;

		switch (req->status) {
		case -ESHUTDOWN:
			disconnect = true;
			pr_vdebug("ttyGS%d: shutdown\n", port->port_num);
			break;

		default:
			/* presumably a transient fault */
			pr_warn("ttyGS%d: unexpected RX status %d\n",
				port->port_num, req->status);
			/* FALLTHROUGH */
		case 0:
			/* normal completion */
			break;
		}

		/* push data to (open) tty */
		if (req->actual && tty) {
			char		*packet = req->buf;
			unsigned	size = req->actual;
			unsigned	n;
			int		count;

			/* we may have pushed part of this packet already... */
			n = port->n_read;
			if (n) {
				packet += n;
				size -= n;
			}

			count = tty_insert_flip_string(&port->port, packet,
					size);
			if (count)
				do_push = true;
			if (count != size) {
				/* stop pushing; TTY layer can't handle more */
				port->n_read += count;
				pr_vdebug("ttyGS%d: rx block %d/%d\n",
					  port->port_num, count, req->actual);
				break;
			}
			port->n_read = 0;
		}

		list_move(&req->list, &port->read_pool);
		port->read_started--;
	}

	/* Push from tty to ldisc; this is handled by a workqueue,
	 * so we won't get callbacks and can hold port_lock
	 */
	if (do_push)
		tty_flip_buffer_push(&port->port);


	/* We want our data queue to become empty ASAP, keeping data
	 * in the tty and ldisc (not here).  If we couldn't push any
	 * this time around, there may be trouble unless there's an
	 * implicit tty_unthrottle() call on its way...
	 *
	 * REVISIT we should probably add a timer to keep the tasklet
	 * from starving ... but it's not clear that case ever happens.
	 */
	if (!list_empty(queue) && tty) {
		if (!tty_throttled(tty)) {
			if (do_push)
				tasklet_schedule(&port->push);
			else
				pr_warn("ttyGS%d: RX not scheduled?\n",
					port->port_num);
		}
	}

	/* If we're still connected, refill the USB RX queue. */
	if (!disconnect && port->port_usb)
		gs_start_rx(port);

	spin_unlock_irq(&port->port_lock);
}
ambauart_rx_chars(struct uart_port *port, unsigned short status)
#endif
{
	struct tty_struct *tty = port->info->tty;
	unsigned short ch, lsr, max_count = 256;
	
	while (UART_RX_DATA(status) && max_count--) {
	    lsr = status;
		if (tty->flip.count >= TTY_FLIPBUF_SIZE) {
			tty->flip.tqueue.routine((void *)tty);
			if (tty->flip.count >= TTY_FLIPBUF_SIZE) {
				printk(KERN_WARNING "TTY_DONT_FLIP set\n");
				return;
			}
		}

		ch = UART_GET_CHAR(port);

		*tty->flip.char_buf_ptr = ch;
		*tty->flip.flag_buf_ptr = TTY_NORMAL;
		port->icount.rx++;

		/*
		 * Note that the error handling code is
		 * out of the main execution path
		 */
		lsr |= UART_DUMMY_LSR_RX;

		if (lsr & KS8695_UART_LINES_ANY) {
			if (lsr & KS8695_UART_LINES_BE) {
				lsr &= ~(KS8695_UART_LINES_FE | KS8695_UART_LINES_PE);
				port->icount.brk++;
				if (uart_handle_break(port))
					goto ignore_char;
			} else if (lsr & KS8695_UART_LINES_PE)
				port->icount.parity++;
			else if (lsr & KS8695_UART_LINES_FE)
				port->icount.frame++;
			if (lsr & KS8695_UART_LINES_OE)
				port->icount.overrun++;

			lsr &= port->read_status_mask;

			if (lsr & KS8695_UART_LINES_BE)
				*tty->flip.flag_buf_ptr = TTY_BREAK;
			else if (lsr & KS8695_UART_LINES_PE)
				*tty->flip.flag_buf_ptr = TTY_PARITY;
			else if (lsr & KS8695_UART_LINES_FE)
				*tty->flip.flag_buf_ptr = TTY_FRAME;
		}

		if (uart_handle_sysrq_char(port, ch, regs))
			goto ignore_char;

		if ((lsr & port->ignore_status_mask) == 0) {
			tty->flip.flag_buf_ptr++;
			tty->flip.char_buf_ptr++;
			tty->flip.count++;
		}
		if ((lsr & KS8695_UART_LINES_OE) &&
		    tty->flip.count < TTY_FLIPBUF_SIZE) {
			/*
			 * Overrun is special, since it's reported
			 * immediately, and doesn't affect the current
			 * character
			 */
			*tty->flip.char_buf_ptr++ = 0;
			*tty->flip.flag_buf_ptr++ = TTY_OVERRUN;
			tty->flip.count++;
		}
	ignore_char:
		status = UART_GET_LSR(port);
	}
	tty_flip_buffer_push(tty);
	return;
}