static void neo_set_no_output_flow_control(struct jsm_channel *ch) { u8 ier, efr; ier = readb(&ch->ch_neo_uart->ier); efr = readb(&ch->ch_neo_uart->efr); jsm_printk(PARAM, INFO, &ch->ch_bd->pci_dev, "Unsetting Output FLOW\n"); /* Turn off auto CTS flow control */ ier &= ~(UART_17158_IER_CTSDSR); efr &= ~(UART_17158_EFR_CTSDSR); /* Turn off auto Xon flow control */ if (ch->ch_c_iflag & IXOFF) efr &= ~(UART_17158_EFR_IXON); else efr &= ~(UART_17158_EFR_ECB | UART_17158_EFR_IXON); /* Why? Becuz Exar's spec says we have to zero it out before setting it */ writeb(0, &ch->ch_neo_uart->efr); /* Turn on UART enhanced bits */ writeb(efr, &ch->ch_neo_uart->efr); /* Turn on table D, with 8 char hi/low watermarks */ writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr); ch->ch_r_watermark = 0; writeb(16, &ch->ch_neo_uart->tfifo); ch->ch_t_tlevel = 16; writeb(16, &ch->ch_neo_uart->rfifo); ch->ch_r_tlevel = 16; writeb(ier, &ch->ch_neo_uart->ier); }
static void neo_set_ixoff_flow_control(struct jsm_channel *ch) { u8 ier, efr; ier = readb(&ch->ch_neo_uart->ier); efr = readb(&ch->ch_neo_uart->efr); jsm_printk(PARAM, INFO, &ch->ch_bd->pci_dev, "Setting IXOFF FLOW\n"); /* Turn off auto RTS flow control */ ier &= ~(UART_17158_IER_RTSDTR); efr &= ~(UART_17158_EFR_RTSDTR); /* Turn on auto Xoff flow control */ ier |= (UART_17158_IER_XOFF); efr |= (UART_17158_EFR_ECB | UART_17158_EFR_IXOFF); /* Why? Becuz Exar's spec says we have to zero it out before setting it */ writeb(0, &ch->ch_neo_uart->efr); /* Turn on UART enhanced bits */ writeb(efr, &ch->ch_neo_uart->efr); /* Turn on table D, with 8 char hi/low watermarks */ writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr); writeb(8, &ch->ch_neo_uart->tfifo); ch->ch_t_tlevel = 8; /* Tell UART what start/stop chars it should be looking for */ writeb(ch->ch_startc, &ch->ch_neo_uart->xonchar1); writeb(0, &ch->ch_neo_uart->xonchar2); writeb(ch->ch_stopc, &ch->ch_neo_uart->xoffchar1); writeb(0, &ch->ch_neo_uart->xoffchar2); writeb(ier, &ch->ch_neo_uart->ier); }
static int __devinit jsm_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int rc = 0; struct jsm_board *brd; static int adapter_count = 0; rc = pci_enable_device(pdev); if (rc) { dev_err(&pdev->dev, "Device enable FAILED\n"); goto out; } rc = pci_request_regions(pdev, "jsm"); if (rc) { dev_err(&pdev->dev, "pci_request_region FAILED\n"); goto out_disable_device; } brd = kzalloc(sizeof(struct jsm_board), GFP_KERNEL); if (!brd) { dev_err(&pdev->dev, "memory allocation for board structure failed\n"); rc = -ENOMEM; goto out_release_regions; } /* store the info for the board we've found */ brd->boardnum = adapter_count++; brd->pci_dev = pdev; if (pdev->device == PCIE_DEVICE_ID_NEO_4_IBM) brd->maxports = 4; else if (pdev->device == PCI_DEVICE_ID_DIGI_NEO_8) brd->maxports = 8; else brd->maxports = 2; spin_lock_init(&brd->bd_intr_lock); /* store which revision we have */ brd->rev = pdev->revision; brd->irq = pdev->irq; jsm_printk(INIT, INFO, &brd->pci_dev, "jsm_found_board - NEO adapter\n"); /* get the PCI Base Address Registers */ brd->membase = pci_resource_start(pdev, 0); brd->membase_end = pci_resource_end(pdev, 0); if (brd->membase & 1) brd->membase &= ~3; else brd->membase &= ~15; /* Assign the board_ops struct */ brd->bd_ops = &jsm_neo_ops; brd->bd_uart_offset = 0x200; brd->bd_dividend = 921600; brd->re_map_membase = ioremap(brd->membase, pci_resource_len(pdev, 0)); if (!brd->re_map_membase) { dev_err(&pdev->dev, "card has no PCI Memory resources, " "failing board.\n"); rc = -ENOMEM; goto out_kfree_brd; } rc = request_irq(brd->irq, brd->bd_ops->intr, IRQF_SHARED, "JSM", brd); if (rc) { printk(KERN_WARNING "Failed to hook IRQ %d\n",brd->irq); goto out_iounmap; } rc = jsm_tty_init(brd); if (rc < 0) { dev_err(&pdev->dev, "Can't init tty devices (%d)\n", rc); rc = -ENXIO; goto out_free_irq; } rc = jsm_uart_port_init(brd); if (rc < 0) { /* XXX: leaking all resources from jsm_tty_init here! */ dev_err(&pdev->dev, "Can't init uart port (%d)\n", rc); rc = -ENXIO; goto out_free_irq; } /* Log the information about the board */ dev_info(&pdev->dev, "board %d: Digi Neo (rev %d), irq %d\n", adapter_count, brd->rev, brd->irq); pci_set_drvdata(pdev, brd); pci_save_state(pdev); return 0; out_free_irq: jsm_remove_uart_port(brd); free_irq(brd->irq, brd); out_iounmap: iounmap(brd->re_map_membase); out_kfree_brd: kfree(brd); out_release_regions: pci_release_regions(pdev); out_disable_device: pci_disable_device(pdev); out: return rc; }
void jsm_check_queue_flow_control(struct jsm_channel *ch) { struct board_ops *bd_ops = ch->ch_bd->bd_ops; int qleft; /* Store how much space we have left in the queue */ if ((qleft = ch->ch_r_tail - ch->ch_r_head - 1) < 0) qleft += RQUEUEMASK + 1; /* * Check to see if we should enforce flow control on our queue because * the ld (or user) isn't reading data out of our queue fast enuf. * * NOTE: This is done based on what the current flow control of the * port is set for. * * 1) HWFLOW (RTS) - Turn off the UART's Receive interrupt. * This will cause the UART's FIFO to back up, and force * the RTS signal to be dropped. * 2) SWFLOW (IXOFF) - Keep trying to send a stop character to * the other side, in hopes it will stop sending data to us. * 3) NONE - Nothing we can do. We will simply drop any extra data * that gets sent into us when the queue fills up. */ if (qleft < 256) { /* HWFLOW */ if (ch->ch_c_cflag & CRTSCTS) { if(!(ch->ch_flags & CH_RECEIVER_OFF)) { bd_ops->disable_receiver(ch); ch->ch_flags |= (CH_RECEIVER_OFF); jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "Internal queue hit hilevel mark (%d)! Turning off interrupts.\n", qleft); } } /* SWFLOW */ else if (ch->ch_c_iflag & IXOFF) { if (ch->ch_stops_sent <= MAX_STOPS_SENT) { bd_ops->send_stop_character(ch); ch->ch_stops_sent++; jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "Sending stop char! Times sent: %x\n", ch->ch_stops_sent); } } } /* * Check to see if we should unenforce flow control because * ld (or user) finally read enuf data out of our queue. * * NOTE: This is done based on what the current flow control of the * port is set for. * * 1) HWFLOW (RTS) - Turn back on the UART's Receive interrupt. * This will cause the UART's FIFO to raise RTS back up, * which will allow the other side to start sending data again. * 2) SWFLOW (IXOFF) - Send a start character to * the other side, so it will start sending data to us again. * 3) NONE - Do nothing. Since we didn't do anything to turn off the * other side, we don't need to do anything now. */ if (qleft > (RQUEUESIZE / 2)) { /* HWFLOW */ if (ch->ch_c_cflag & CRTSCTS) { if (ch->ch_flags & CH_RECEIVER_OFF) { bd_ops->enable_receiver(ch); ch->ch_flags &= ~(CH_RECEIVER_OFF); jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "Internal queue hit lowlevel mark (%d)! Turning on interrupts.\n", qleft); } } /* SWFLOW */ else if (ch->ch_c_iflag & IXOFF && ch->ch_stops_sent) { ch->ch_stops_sent = 0; bd_ops->send_start_character(ch); jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "Sending start char!\n"); } } }
static void jsm_carrier(struct jsm_channel *ch) { struct jsm_board *bd; int virt_carrier = 0; int phys_carrier = 0; jsm_printk(CARR, INFO, &ch->ch_bd->pci_dev, "start\n"); if (!ch) return; bd = ch->ch_bd; if (!bd) return; if (ch->ch_mistat & UART_MSR_DCD) { jsm_printk(CARR, INFO, &ch->ch_bd->pci_dev, "mistat: %x D_CD: %x\n", ch->ch_mistat, ch->ch_mistat & UART_MSR_DCD); phys_carrier = 1; } if (ch->ch_c_cflag & CLOCAL) virt_carrier = 1; jsm_printk(CARR, INFO, &ch->ch_bd->pci_dev, "DCD: physical: %d virt: %d\n", phys_carrier, virt_carrier); /* * Test for a VIRTUAL carrier transition to HIGH. */ if (((ch->ch_flags & CH_FCAR) == 0) && (virt_carrier == 1)) { /* * When carrier rises, wake any threads waiting * for carrier in the open routine. */ jsm_printk(CARR, INFO, &ch->ch_bd->pci_dev, "carrier: virt DCD rose\n"); if (waitqueue_active(&(ch->ch_flags_wait))) wake_up_interruptible(&ch->ch_flags_wait); } /* * Test for a PHYSICAL carrier transition to HIGH. */ if (((ch->ch_flags & CH_CD) == 0) && (phys_carrier == 1)) { /* * When carrier rises, wake any threads waiting * for carrier in the open routine. */ jsm_printk(CARR, INFO, &ch->ch_bd->pci_dev, "carrier: physical DCD rose\n"); if (waitqueue_active(&(ch->ch_flags_wait))) wake_up_interruptible(&ch->ch_flags_wait); } /* * Test for a PHYSICAL transition to low, so long as we aren't * currently ignoring physical transitions (which is what "virtual * carrier" indicates). * * The transition of the virtual carrier to low really doesn't * matter... it really only means "ignore carrier state", not * "make pretend that carrier is there". */ if ((virt_carrier == 0) && ((ch->ch_flags & CH_CD) != 0) && (phys_carrier == 0)) { /* * When carrier drops: * * Drop carrier on all open units. * * Flush queues, waking up any task waiting in the * line discipline. * * Send a hangup to the control terminal. * * Enable all select calls. */ if (waitqueue_active(&(ch->ch_flags_wait))) wake_up_interruptible(&ch->ch_flags_wait); } /* * Make sure that our cached values reflect the current reality. */ if (virt_carrier == 1) ch->ch_flags |= CH_FCAR; else ch->ch_flags &= ~CH_FCAR; if (phys_carrier == 1) ch->ch_flags |= CH_CD; else ch->ch_flags &= ~CH_CD; }
void jsm_input(struct jsm_channel *ch) { struct jsm_board *bd; struct tty_struct *tp; u32 rmask; u16 head; u16 tail; int data_len; unsigned long lock_flags; int len = 0; int n = 0; int s = 0; int i = 0; jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "start\n"); if (!ch) return; tp = ch->uart_port.state->port.tty; bd = ch->ch_bd; if(!bd) return; spin_lock_irqsave(&ch->ch_lock, lock_flags); /* *Figure the number of characters in the buffer. *Exit immediately if none. */ rmask = RQUEUEMASK; head = ch->ch_r_head & rmask; tail = ch->ch_r_tail & rmask; data_len = (head - tail) & rmask; if (data_len == 0) { spin_unlock_irqrestore(&ch->ch_lock, lock_flags); return; } jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "start\n"); /* *If the device is not open, or CREAD is off, flush *input data and return immediately. */ if (!tp || !(tp->termios->c_cflag & CREAD) ) { jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "input. dropping %d bytes on port %d...\n", data_len, ch->ch_portnum); ch->ch_r_head = tail; /* Force queue flow control to be released, if needed */ jsm_check_queue_flow_control(ch); spin_unlock_irqrestore(&ch->ch_lock, lock_flags); return; } /* * If we are throttled, simply don't read any data. */ if (ch->ch_flags & CH_STOPI) { spin_unlock_irqrestore(&ch->ch_lock, lock_flags); jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "Port %d throttled, not reading any data. head: %x tail: %x\n", ch->ch_portnum, head, tail); return; } jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "start 2\n"); if (data_len <= 0) { spin_unlock_irqrestore(&ch->ch_lock, lock_flags); jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "jsm_input 1\n"); return; } len = tty_buffer_request_room(tp, data_len); n = len; /* * n now contains the most amount of data we can copy, * bounded either by the flip buffer size or the amount * of data the card actually has pending... */ while (n) { s = ((head >= tail) ? head : RQUEUESIZE) - tail; s = min(s, n); if (s <= 0) break; /* * If conditions are such that ld needs to see all * UART errors, we will have to walk each character * and error byte and send them to the buffer one at * a time. */ if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) { for (i = 0; i < s; i++) { /* * Give the Linux ld the flags in the * format it likes. */ if (*(ch->ch_equeue +tail +i) & UART_LSR_BI) tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_BREAK); else if (*(ch->ch_equeue +tail +i) & UART_LSR_PE) tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_PARITY); else if (*(ch->ch_equeue +tail +i) & UART_LSR_FE) tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_FRAME); else tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_NORMAL); } } else { tty_insert_flip_string(tp, ch->ch_rqueue + tail, s) ; } tail += s; n -= s; /* Flip queue if needed */ tail &= rmask; } ch->ch_r_tail = tail & rmask; ch->ch_e_tail = tail & rmask; jsm_check_queue_flow_control(ch); spin_unlock_irqrestore(&ch->ch_lock, lock_flags); /* Tell the tty layer its okay to "eat" the data now */ tty_flip_buffer_push(tp); jsm_printk(IOCTL, INFO, &ch->ch_bd->pci_dev, "finish\n"); }
/* * jsm_tty_init() * * Init the tty subsystem. Called once per board after board has been * downloaded and init'ed. */ int __devinit jsm_tty_init(struct jsm_board *brd) { int i; void __iomem *vaddr; struct jsm_channel *ch; if (!brd) return -ENXIO; jsm_printk(INIT, INFO, &brd->pci_dev, "start\n"); /* * Initialize board structure elements. */ brd->nasync = brd->maxports; /* * Allocate channel memory that might not have been allocated * when the driver was first loaded. */ for (i = 0; i < brd->nasync; i++) { if (!brd->channels[i]) { /* * Okay to malloc with GFP_KERNEL, we are not at * interrupt context, and there are no locks held. */ brd->channels[i] = kzalloc(sizeof(struct jsm_channel), GFP_KERNEL); if (!brd->channels[i]) { jsm_printk(CORE, ERR, &brd->pci_dev, "%s:%d Unable to allocate memory for channel struct\n", __FILE__, __LINE__); } } } ch = brd->channels[0]; vaddr = brd->re_map_membase; /* Set up channel variables */ for (i = 0; i < brd->nasync; i++, ch = brd->channels[i]) { if (!brd->channels[i]) continue; spin_lock_init(&ch->ch_lock); if (brd->bd_uart_offset == 0x200) ch->ch_neo_uart = vaddr + (brd->bd_uart_offset * i); ch->ch_bd = brd; ch->ch_portnum = i; /* .25 second delay */ ch->ch_close_delay = 250; init_waitqueue_head(&ch->ch_flags_wait); } jsm_printk(INIT, INFO, &brd->pci_dev, "finish\n"); return 0; }
static int jsm_tty_open(struct uart_port *port) { struct jsm_board *brd; struct jsm_channel *channel = (struct jsm_channel *)port; struct ktermios *termios; /* Get board pointer from our array of majors we have allocated */ brd = channel->ch_bd; /* * Allocate channel buffers for read/write/error. * Set flag, so we don't get trounced on. */ channel->ch_flags |= (CH_OPENING); /* Drop locks, as malloc with GFP_KERNEL can sleep */ if (!channel->ch_rqueue) { channel->ch_rqueue = kzalloc(RQUEUESIZE, GFP_KERNEL); if (!channel->ch_rqueue) { jsm_printk(INIT, ERR, &channel->ch_bd->pci_dev, "unable to allocate read queue buf"); return -ENOMEM; } } if (!channel->ch_equeue) { channel->ch_equeue = kzalloc(EQUEUESIZE, GFP_KERNEL); if (!channel->ch_equeue) { jsm_printk(INIT, ERR, &channel->ch_bd->pci_dev, "unable to allocate error queue buf"); return -ENOMEM; } } if (!channel->ch_wqueue) { channel->ch_wqueue = kzalloc(WQUEUESIZE, GFP_KERNEL); if (!channel->ch_wqueue) { jsm_printk(INIT, ERR, &channel->ch_bd->pci_dev, "unable to allocate write queue buf"); return -ENOMEM; } } channel->ch_flags &= ~(CH_OPENING); /* * Initialize if neither terminal is open. */ jsm_printk(OPEN, INFO, &channel->ch_bd->pci_dev, "jsm_open: initializing channel in open...\n"); /* * Flush input queues. */ channel->ch_r_head = channel->ch_r_tail = 0; channel->ch_e_head = channel->ch_e_tail = 0; channel->ch_w_head = channel->ch_w_tail = 0; brd->bd_ops->flush_uart_write(channel); brd->bd_ops->flush_uart_read(channel); channel->ch_flags = 0; channel->ch_cached_lsr = 0; channel->ch_stops_sent = 0; termios = port->state->port.tty->termios; channel->ch_c_cflag = termios->c_cflag; channel->ch_c_iflag = termios->c_iflag; channel->ch_c_oflag = termios->c_oflag; channel->ch_c_lflag = termios->c_lflag; channel->ch_startc = termios->c_cc[VSTART]; channel->ch_stopc = termios->c_cc[VSTOP]; /* Tell UART to init itself */ brd->bd_ops->uart_init(channel); /* * Run param in case we changed anything */ brd->bd_ops->param(channel); jsm_carrier(channel); channel->ch_open_count++; jsm_printk(OPEN, INFO, &channel->ch_bd->pci_dev, "finish\n"); return 0; }
static inline void neo_parse_lsr(struct jsm_board *brd, u32 port) { struct jsm_channel *ch; int linestatus; unsigned long lock_flags; if (!brd) return; if (port > brd->maxports) return; ch = brd->channels[port]; if (!ch) return; linestatus = readb(&ch->ch_neo_uart->lsr); jsm_printk(INTR, INFO, &ch->ch_bd->pci_dev, "%s:%d port: %d linestatus: %x\n", __FILE__, __LINE__, port, linestatus); ch->ch_cached_lsr |= linestatus; if (ch->ch_cached_lsr & UART_LSR_DR) { /* Read data from uart -> queue */ neo_copy_data_from_uart_to_queue(ch); spin_lock_irqsave(&ch->ch_lock, lock_flags); jsm_check_queue_flow_control(ch); spin_unlock_irqrestore(&ch->ch_lock, lock_flags); } /* * This is a special flag. It indicates that at least 1 * RX error (parity, framing, or break) has happened. * Mark this in our struct, which will tell me that I have *to do the special RX+LSR read for this FIFO load. */ if (linestatus & UART_17158_RX_FIFO_DATA_ERROR) jsm_printk(INTR, DEBUG, &ch->ch_bd->pci_dev, "%s:%d Port: %d Got an RX error, need to parse LSR\n", __FILE__, __LINE__, port); /* * The next 3 tests should *NOT* happen, as the above test * should encapsulate all 3... At least, thats what Exar says. */ if (linestatus & UART_LSR_PE) { ch->ch_err_parity++; jsm_printk(INTR, DEBUG, &ch->ch_bd->pci_dev, "%s:%d Port: %d. PAR ERR!\n", __FILE__, __LINE__, port); } if (linestatus & UART_LSR_FE) { ch->ch_err_frame++; jsm_printk(INTR, DEBUG, &ch->ch_bd->pci_dev, "%s:%d Port: %d. FRM ERR!\n", __FILE__, __LINE__, port); } if (linestatus & UART_LSR_BI) { ch->ch_err_break++; jsm_printk(INTR, DEBUG, &ch->ch_bd->pci_dev, "%s:%d Port: %d. BRK INTR!\n", __FILE__, __LINE__, port); } if (linestatus & UART_LSR_OE) { /* * Rx Oruns. Exar says that an orun will NOT corrupt * the FIFO. It will just replace the holding register * with this new data byte. So basically just ignore this. * Probably we should eventually have an orun stat in our driver... */ ch->ch_err_overrun++; jsm_printk(INTR, DEBUG, &ch->ch_bd->pci_dev, "%s:%d Port: %d. Rx Overrun!\n", __FILE__, __LINE__, port); } if (linestatus & UART_LSR_THRE) { spin_lock_irqsave(&ch->ch_lock, lock_flags); ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); spin_unlock_irqrestore(&ch->ch_lock, lock_flags); /* Transfer data (if any) from Write Queue -> UART. */ neo_copy_data_from_queue_to_uart(ch); } else if (linestatus & UART_17158_TX_AND_FIFO_CLR) { spin_lock_irqsave(&ch->ch_lock, lock_flags); ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); spin_unlock_irqrestore(&ch->ch_lock, lock_flags); /* Transfer data (if any) from Write Queue -> UART. */ neo_copy_data_from_queue_to_uart(ch); } }
/* * Parse the ISR register. */ static inline void neo_parse_isr(struct jsm_board *brd, u32 port) { struct jsm_channel *ch; u8 isr; u8 cause; unsigned long lock_flags; if (!brd) return; if (port > brd->maxports) return; ch = brd->channels[port]; if (!ch) return; /* Here we try to figure out what caused the interrupt to happen */ while (1) { isr = readb(&ch->ch_neo_uart->isr_fcr); /* Bail if no pending interrupt */ if (isr & UART_IIR_NO_INT) break; /* * Yank off the upper 2 bits, which just show that the FIFO's are enabled. */ isr &= ~(UART_17158_IIR_FIFO_ENABLED); jsm_printk(INTR, INFO, &ch->ch_bd->pci_dev, "%s:%d isr: %x\n", __FILE__, __LINE__, isr); if (isr & (UART_17158_IIR_RDI_TIMEOUT | UART_IIR_RDI)) { /* Read data from uart -> queue */ neo_copy_data_from_uart_to_queue(ch); /* Call our tty layer to enforce queue flow control if needed. */ spin_lock_irqsave(&ch->ch_lock, lock_flags); jsm_check_queue_flow_control(ch); spin_unlock_irqrestore(&ch->ch_lock, lock_flags); } if (isr & UART_IIR_THRI) { /* Transfer data (if any) from Write Queue -> UART. */ spin_lock_irqsave(&ch->ch_lock, lock_flags); ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); spin_unlock_irqrestore(&ch->ch_lock, lock_flags); neo_copy_data_from_queue_to_uart(ch); } if (isr & UART_17158_IIR_XONXOFF) { cause = readb(&ch->ch_neo_uart->xoffchar1); jsm_printk(INTR, INFO, &ch->ch_bd->pci_dev, "Port %d. Got ISR_XONXOFF: cause:%x\n", port, cause); /* * Since the UART detected either an XON or * XOFF match, we need to figure out which * one it was, so we can suspend or resume data flow. */ spin_lock_irqsave(&ch->ch_lock, lock_flags); if (cause == UART_17158_XON_DETECT) { /* Is output stopped right now, if so, resume it */ if (brd->channels[port]->ch_flags & CH_STOP) { ch->ch_flags &= ~(CH_STOP); } jsm_printk(INTR, INFO, &ch->ch_bd->pci_dev, "Port %d. XON detected in incoming data\n", port); } else if (cause == UART_17158_XOFF_DETECT) { if (!(brd->channels[port]->ch_flags & CH_STOP)) { ch->ch_flags |= CH_STOP; jsm_printk(INTR, INFO, &ch->ch_bd->pci_dev, "Setting CH_STOP\n"); } jsm_printk(INTR, INFO, &ch->ch_bd->pci_dev, "Port: %d. XOFF detected in incoming data\n", port); } spin_unlock_irqrestore(&ch->ch_lock, lock_flags); } if (isr & UART_17158_IIR_HWFLOW_STATE_CHANGE) { /* * If we get here, this means the hardware is doing auto flow control. * Check to see whether RTS/DTR or CTS/DSR caused this interrupt. */ cause = readb(&ch->ch_neo_uart->mcr); /* Which pin is doing auto flow? RTS or DTR? */ spin_lock_irqsave(&ch->ch_lock, lock_flags); if ((cause & 0x4) == 0) { if (cause & UART_MCR_RTS) ch->ch_mostat |= UART_MCR_RTS; else ch->ch_mostat &= ~(UART_MCR_RTS); } else { if (cause & UART_MCR_DTR) ch->ch_mostat |= UART_MCR_DTR; else ch->ch_mostat &= ~(UART_MCR_DTR); } spin_unlock_irqrestore(&ch->ch_lock, lock_flags); } /* Parse any modem signal changes */ jsm_printk(INTR, INFO, &ch->ch_bd->pci_dev, "MOD_STAT: sending to parse_modem_sigs\n"); neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr)); } }
static void neo_copy_data_from_queue_to_uart(struct jsm_channel *ch) { u16 head; u16 tail; int n; int s; int qlen; u32 len_written = 0; if (!ch) return; /* No data to write to the UART */ if (ch->ch_w_tail == ch->ch_w_head) return; /* If port is "stopped", don't send any data to the UART */ if ((ch->ch_flags & CH_STOP) || (ch->ch_flags & CH_BREAK_SENDING)) return; /* * If FIFOs are disabled. Send data directly to txrx register */ if (!(ch->ch_flags & CH_FIFO_ENABLED)) { u8 lsrbits = readb(&ch->ch_neo_uart->lsr); ch->ch_cached_lsr |= lsrbits; if (ch->ch_cached_lsr & UART_LSR_THRE) { ch->ch_cached_lsr &= ~(UART_LSR_THRE); writeb(ch->ch_wqueue[ch->ch_w_tail], &ch->ch_neo_uart->txrx); jsm_printk(WRITE, INFO, &ch->ch_bd->pci_dev, "Tx data: %x\n", ch->ch_wqueue[ch->ch_w_head]); ch->ch_w_tail++; ch->ch_w_tail &= WQUEUEMASK; ch->ch_txcount++; } return; } /* * We have to do it this way, because of the EXAR TXFIFO count bug. */ if (!(ch->ch_flags & (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM))) return; n = UART_17158_TX_FIFOSIZE - ch->ch_t_tlevel; /* cache head and tail of queue */ head = ch->ch_w_head & WQUEUEMASK; tail = ch->ch_w_tail & WQUEUEMASK; qlen = (head - tail) & WQUEUEMASK; /* Find minimum of the FIFO space, versus queue length */ n = min(n, qlen); while (n > 0) { s = ((head >= tail) ? head : WQUEUESIZE) - tail; s = min(s, n); if (s <= 0) break; memcpy_toio(&ch->ch_neo_uart->txrxburst, ch->ch_wqueue + tail, s); /* Add and flip queue if needed */ tail = (tail + s) & WQUEUEMASK; n -= s; ch->ch_txcount += s; len_written += s; } /* Update the final tail */ ch->ch_w_tail = tail & WQUEUEMASK; if (len_written >= ch->ch_t_tlevel) ch->ch_flags &= ~(CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); if (!jsm_tty_write(&ch->uart_port)) uart_write_wakeup(&ch->uart_port); }
static void neo_copy_data_from_uart_to_queue(struct jsm_channel *ch) { int qleft = 0; u8 linestatus = 0; u8 error_mask = 0; int n = 0; int total = 0; u16 head; u16 tail; if (!ch) return; /* cache head and tail of queue */ head = ch->ch_r_head & RQUEUEMASK; tail = ch->ch_r_tail & RQUEUEMASK; /* Get our cached LSR */ linestatus = ch->ch_cached_lsr; ch->ch_cached_lsr = 0; /* Store how much space we have left in the queue */ if ((qleft = tail - head - 1) < 0) qleft += RQUEUEMASK + 1; /* * If the UART is not in FIFO mode, force the FIFO copy to * NOT be run, by setting total to 0. * * On the other hand, if the UART IS in FIFO mode, then ask * the UART to give us an approximation of data it has RX'ed. */ if (!(ch->ch_flags & CH_FIFO_ENABLED)) total = 0; else { total = readb(&ch->ch_neo_uart->rfifo); /* * EXAR chip bug - RX FIFO COUNT - Fudge factor. * * This resolves a problem/bug with the Exar chip that sometimes * returns a bogus value in the rfifo register. * The count can be any where from 0-3 bytes "off". * Bizarre, but true. */ total -= 3; } /* * Finally, bound the copy to make sure we don't overflow * our own queue... * The byte by byte copy loop below this loop this will * deal with the queue overflow possibility. */ total = min(total, qleft); while (total > 0) { /* * Grab the linestatus register, we need to check * to see if there are any errors in the FIFO. */ linestatus = readb(&ch->ch_neo_uart->lsr); /* * Break out if there is a FIFO error somewhere. * This will allow us to go byte by byte down below, * finding the exact location of the error. */ if (linestatus & UART_17158_RX_FIFO_DATA_ERROR) break; /* Make sure we don't go over the end of our queue */ n = min(((u32) total), (RQUEUESIZE - (u32) head)); /* * Cut down n even further if needed, this is to fix * a problem with memcpy_fromio() with the Neo on the * IBM pSeries platform. * 15 bytes max appears to be the magic number. */ n = min((u32) n, (u32) 12); /* * Since we are grabbing the linestatus register, which * will reset some bits after our read, we need to ensure * we don't miss our TX FIFO emptys. */ if (linestatus & (UART_LSR_THRE | UART_17158_TX_AND_FIFO_CLR)) ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); linestatus = 0; /* Copy data from uart to the queue */ memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, n); /* * Since RX_FIFO_DATA_ERROR was 0, we are guarenteed * that all the data currently in the FIFO is free of * breaks and parity/frame/orun errors. */ memset(ch->ch_equeue + head, 0, n); /* Add to and flip head if needed */ head = (head + n) & RQUEUEMASK; total -= n; qleft -= n; ch->ch_rxcount += n; } /* * Create a mask to determine whether we should * insert the character (if any) into our queue. */ if (ch->ch_c_iflag & IGNBRK) error_mask |= UART_LSR_BI; /* * Now cleanup any leftover bytes still in the UART. * Also deal with any possible queue overflow here as well. */ while (1) { /* * Its possible we have a linestatus from the loop above * this, so we "OR" on any extra bits. */ linestatus |= readb(&ch->ch_neo_uart->lsr); /* * If the chip tells us there is no more data pending to * be read, we can then leave. * But before we do, cache the linestatus, just in case. */ if (!(linestatus & UART_LSR_DR)) { ch->ch_cached_lsr = linestatus; break; } /* No need to store this bit */ linestatus &= ~UART_LSR_DR; /* * Since we are grabbing the linestatus register, which * will reset some bits after our read, we need to ensure * we don't miss our TX FIFO emptys. */ if (linestatus & (UART_LSR_THRE | UART_17158_TX_AND_FIFO_CLR)) { linestatus &= ~(UART_LSR_THRE | UART_17158_TX_AND_FIFO_CLR); ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); } /* * Discard character if we are ignoring the error mask. */ if (linestatus & error_mask) { u8 discard; linestatus = 0; memcpy_fromio(&discard, &ch->ch_neo_uart->txrxburst, 1); continue; } /* * If our queue is full, we have no choice but to drop some data. * The assumption is that HWFLOW or SWFLOW should have stopped * things way way before we got to this point. * * I decided that I wanted to ditch the oldest data first, * I hope thats okay with everyone? Yes? Good. */ while (qleft < 1) { jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "Queue full, dropping DATA:%x LSR:%x\n", ch->ch_rqueue[tail], ch->ch_equeue[tail]); ch->ch_r_tail = tail = (tail + 1) & RQUEUEMASK; ch->ch_err_overrun++; qleft++; } memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, 1); ch->ch_equeue[head] = (u8) linestatus; jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "DATA/LSR pair: %x %x\n", ch->ch_rqueue[head], ch->ch_equeue[head]); /* Ditch any remaining linestatus value. */ linestatus = 0; /* Add to and flip head if needed */ head = (head + 1) & RQUEUEMASK; qleft--; ch->ch_rxcount++; } /* * Write new final heads to channel structure. */ ch->ch_r_head = head & RQUEUEMASK; ch->ch_e_head = head & EQUEUEMASK; jsm_input(ch); }
/* * neo_param() * Send any/all changes to the line to the UART. */ static void neo_param(struct jsm_channel *ch) { u8 lcr = 0; u8 uart_lcr = 0; u8 ier = 0; u32 baud = 9600; int quot = 0; struct jsm_board *bd; bd = ch->ch_bd; if (!bd) return; /* * If baud rate is zero, flush queues, and set mval to drop DTR. */ if ((ch->ch_c_cflag & (CBAUD)) == 0) { ch->ch_r_head = ch->ch_r_tail = 0; ch->ch_e_head = ch->ch_e_tail = 0; ch->ch_w_head = ch->ch_w_tail = 0; neo_flush_uart_write(ch); neo_flush_uart_read(ch); ch->ch_flags |= (CH_BAUD0); ch->ch_mostat &= ~(UART_MCR_RTS | UART_MCR_DTR); neo_assert_modem_signals(ch); ch->ch_old_baud = 0; return; } else if (ch->ch_custom_speed) { baud = ch->ch_custom_speed; if (ch->ch_flags & CH_BAUD0) ch->ch_flags &= ~(CH_BAUD0); } else { int iindex = 0; int jindex = 0; const u64 bauds[4][16] = { { 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800, 9600, 19200, 38400 }, { 0, 57600, 115200, 230400, 460800, 150, 200, 921600, 600, 1200, 1800, 2400, 4800, 9600, 19200, 38400 }, { 0, 57600, 76800, 115200, 131657, 153600, 230400, 460800, 921600, 1200, 1800, 2400, 4800, 9600, 19200, 38400 }, { 0, 57600, 115200, 230400, 460800, 150, 200, 921600, 600, 1200, 1800, 2400, 4800, 9600, 19200, 38400 } }; baud = C_BAUD(ch->uart_port.info->tty) & 0xff; if (ch->ch_c_cflag & CBAUDEX) iindex = 1; jindex = baud; if ((iindex >= 0) && (iindex < 4) && (jindex >= 0) && (jindex < 16)) baud = bauds[iindex][jindex]; else { jsm_printk(IOCTL, DEBUG, &ch->ch_bd->pci_dev, "baud indices were out of range (%d)(%d)", iindex, jindex); baud = 0; } if (baud == 0) baud = 9600; if (ch->ch_flags & CH_BAUD0) ch->ch_flags &= ~(CH_BAUD0); } if (ch->ch_c_cflag & PARENB) lcr |= UART_LCR_PARITY; if (!(ch->ch_c_cflag & PARODD)) lcr |= UART_LCR_EPAR; /* * Not all platforms support mark/space parity, * so this will hide behind an ifdef. */ #ifdef CMSPAR if (ch->ch_c_cflag & CMSPAR) lcr |= UART_LCR_SPAR; #endif if (ch->ch_c_cflag & CSTOPB) lcr |= UART_LCR_STOP; switch (ch->ch_c_cflag & CSIZE) { case CS5: lcr |= UART_LCR_WLEN5; break; case CS6: lcr |= UART_LCR_WLEN6; break; case CS7: lcr |= UART_LCR_WLEN7; break; case CS8: default: lcr |= UART_LCR_WLEN8; break; } ier = readb(&ch->ch_neo_uart->ier); uart_lcr = readb(&ch->ch_neo_uart->lcr); if (baud == 0) baud = 9600; quot = ch->ch_bd->bd_dividend / baud; if (quot != 0) { ch->ch_old_baud = baud; writeb(UART_LCR_DLAB, &ch->ch_neo_uart->lcr); writeb((quot & 0xff), &ch->ch_neo_uart->txrx); writeb((quot >> 8), &ch->ch_neo_uart->ier); writeb(lcr, &ch->ch_neo_uart->lcr); }
void jsm_input(struct jsm_channel *ch) { struct jsm_board *bd; struct tty_struct *tp; u32 rmask; u16 head; u16 tail; int data_len; unsigned long lock_flags; int flip_len; int len = 0; int n = 0; char *buf = NULL; char *buf2 = NULL; int s = 0; int i = 0; jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "start\n"); if (!ch) return; tp = ch->uart_port.info->tty; bd = ch->ch_bd; if(!bd) return; spin_lock_irqsave(&ch->ch_lock, lock_flags); /* *Figure the number of characters in the buffer. *Exit immediately if none. */ rmask = RQUEUEMASK; head = ch->ch_r_head & rmask; tail = ch->ch_r_tail & rmask; data_len = (head - tail) & rmask; if (data_len == 0) { spin_unlock_irqrestore(&ch->ch_lock, lock_flags); return; } jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "start\n"); /* *If the device is not open, or CREAD is off, flush *input data and return immediately. */ if (!tp || !(tp->termios->c_cflag & CREAD) ) { jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "input. dropping %d bytes on port %d...\n", data_len, ch->ch_portnum); ch->ch_r_head = tail; /* Force queue flow control to be released, if needed */ jsm_check_queue_flow_control(ch); spin_unlock_irqrestore(&ch->ch_lock, lock_flags); return; } /* * If we are throttled, simply don't read any data. */ if (ch->ch_flags & CH_STOPI) { spin_unlock_irqrestore(&ch->ch_lock, lock_flags); jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "Port %d throttled, not reading any data. head: %x tail: %x\n", ch->ch_portnum, head, tail); return; } jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "start 2\n"); /* * If the rxbuf is empty and we are not throttled, put as much * as we can directly into the linux TTY flip buffer. * The jsm_rawreadok case takes advantage of carnal knowledge that * the char_buf and the flag_buf are next to each other and * are each of (2 * TTY_FLIPBUF_SIZE) size. * * NOTE: if(!tty->real_raw), the call to ldisc.receive_buf *actually still uses the flag buffer, so you can't *use it for input data */ if (jsm_rawreadok) { if (tp->real_raw) flip_len = MYFLIPLEN; else flip_len = 2 * TTY_FLIPBUF_SIZE; } else flip_len = TTY_FLIPBUF_SIZE - tp->flip.count; len = min(data_len, flip_len); len = min(len, (N_TTY_BUF_SIZE - 1) - tp->read_cnt); if (len <= 0) { spin_unlock_irqrestore(&ch->ch_lock, lock_flags); jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "jsm_input 1\n"); return; } /* * If we're bypassing flip buffers on rx, we can blast it * right into the beginning of the buffer. */ if (jsm_rawreadok) { if (tp->real_raw) { if (ch->ch_flags & CH_FLIPBUF_IN_USE) { jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "JSM - FLIPBUF in use. delaying input\n"); spin_unlock_irqrestore(&ch->ch_lock, lock_flags); return; } ch->ch_flags |= CH_FLIPBUF_IN_USE; buf = ch->ch_bd->flipbuf; buf2 = NULL; } else { buf = tp->flip.char_buf; buf2 = tp->flip.flag_buf; } } else { buf = tp->flip.char_buf_ptr; buf2 = tp->flip.flag_buf_ptr; } n = len; /* * n now contains the most amount of data we can copy, * bounded either by the flip buffer size or the amount * of data the card actually has pending... */ while (n) { s = ((head >= tail) ? head : RQUEUESIZE) - tail; s = min(s, n); if (s <= 0) break; memcpy(buf, ch->ch_rqueue + tail, s); /* buf2 is only set when port isn't raw */ if (buf2) memcpy(buf2, ch->ch_equeue + tail, s); tail += s; buf += s; if (buf2) buf2 += s; n -= s; /* Flip queue if needed */ tail &= rmask; } /* * In high performance mode, we don't have to update * flag_buf or any of the counts or pointers into flip buf. */ if (!jsm_rawreadok) { if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) { for (i = 0; i < len; i++) { /* * Give the Linux ld the flags in the * format it likes. */ if (tp->flip.flag_buf_ptr[i] & UART_LSR_BI) tp->flip.flag_buf_ptr[i] = TTY_BREAK; else if (tp->flip.flag_buf_ptr[i] & UART_LSR_PE) tp->flip.flag_buf_ptr[i] = TTY_PARITY; else if (tp->flip.flag_buf_ptr[i] & UART_LSR_FE) tp->flip.flag_buf_ptr[i] = TTY_FRAME; else tp->flip.flag_buf_ptr[i] = TTY_NORMAL; } } else { memset(tp->flip.flag_buf_ptr, 0, len); } tp->flip.char_buf_ptr += len; tp->flip.flag_buf_ptr += len; tp->flip.count += len; } else if (!tp->real_raw) { if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) { for (i = 0; i < len; i++) { /* * Give the Linux ld the flags in the * format it likes. */ if (tp->flip.flag_buf_ptr[i] & UART_LSR_BI) tp->flip.flag_buf_ptr[i] = TTY_BREAK; else if (tp->flip.flag_buf_ptr[i] & UART_LSR_PE) tp->flip.flag_buf_ptr[i] = TTY_PARITY; else if (tp->flip.flag_buf_ptr[i] & UART_LSR_FE) tp->flip.flag_buf_ptr[i] = TTY_FRAME; else tp->flip.flag_buf_ptr[i] = TTY_NORMAL; } } else memset(tp->flip.flag_buf, 0, len); } /* * If we're doing raw reads, jam it right into the * line disc bypassing the flip buffers. */ if (jsm_rawreadok) { if (tp->real_raw) { ch->ch_r_tail = tail & rmask; ch->ch_e_tail = tail & rmask; jsm_check_queue_flow_control(ch); /* !!! WE *MUST* LET GO OF ALL LOCKS BEFORE CALLING RECEIVE BUF !!! */ spin_unlock_irqrestore(&ch->ch_lock, lock_flags); jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "jsm_input. %d real_raw len:%d calling receive_buf for board %d\n", __LINE__, len, ch->ch_bd->boardnum); tp->ldisc.receive_buf(tp, ch->ch_bd->flipbuf, NULL, len); /* Allow use of channel flip buffer again */ spin_lock_irqsave(&ch->ch_lock, lock_flags); ch->ch_flags &= ~CH_FLIPBUF_IN_USE; spin_unlock_irqrestore(&ch->ch_lock, lock_flags); } else { ch->ch_r_tail = tail & rmask; ch->ch_e_tail = tail & rmask; jsm_check_queue_flow_control(ch); /* !!! WE *MUST* LET GO OF ALL LOCKS BEFORE CALLING RECEIVE BUF !!! */ spin_unlock_irqrestore(&ch->ch_lock, lock_flags); jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "jsm_input. %d not real_raw len:%d calling receive_buf for board %d\n", __LINE__, len, ch->ch_bd->boardnum); tp->ldisc.receive_buf(tp, tp->flip.char_buf, tp->flip.flag_buf, len); } } else { ch->ch_r_tail = tail & rmask; ch->ch_e_tail = tail & rmask; jsm_check_queue_flow_control(ch); spin_unlock_irqrestore(&ch->ch_lock, lock_flags); jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "jsm_input. %d not jsm_read raw okay scheduling flip\n", __LINE__); tty_schedule_flip(tp); } jsm_printk(IOCTL, INFO, &ch->ch_bd->pci_dev, "finish\n"); }